Example usage for org.apache.commons.lang.time StopWatch reset

List of usage examples for org.apache.commons.lang.time StopWatch reset

Introduction

In this page you can find the example usage for org.apache.commons.lang.time StopWatch reset.

Prototype

public void reset() 

Source Link

Document

Resets the stopwatch.

Usage

From source file:org.openmrs.module.emrmonitor.api.impl.EmrMonitorServiceImpl.java

@Override
public EmrMonitorReport generateEmrMonitorReport() {
    log.debug("Generating EMR monitor report");

    EmrMonitorServer localServer = ensureLocalServer();
    EmrMonitorReport report = new EmrMonitorReport();
    report.setServer(localServer);/*from   w w w .j a va2s  . c  o  m*/
    report.setDateCreated(new Date());
    report.setStatus(EmrMonitorReport.SubmissionStatus.WAITING_TO_SEND);

    List<String> disabledProducers = EmrMonitorConfig.getDisabledMetricProducers();

    StopWatch sw = new StopWatch();
    for (MetricProducer metricProducer : Context.getRegisteredComponents(MetricProducer.class)) {
        if (metricProducer.isEnabled()) {
            String namespace = metricProducer.getNamespace();
            if (!disabledProducers.contains(namespace)) {
                log.debug("Generating metrics for " + namespace + " ("
                        + metricProducer.getClass().getSimpleName() + ")");
                sw.start();
                Map<String, String> metrics = metricProducer.produceMetrics();
                if (metrics != null) {
                    for (String metricName : metrics.keySet()) {
                        String metricValue = metrics.get(metricName);
                        report.setMetric(namespace + "." + metricName, metricValue);
                        log.debug(metricName + ": " + metricValue);
                    }
                }
                sw.stop();
                log.debug(namespace + " metrics generated in: " + sw.toString());
                sw.reset();
            }
        }
    }

    return saveEmrMonitorReport(report);
}

From source file:org.openmrs.module.reporting.data.patient.service.PatientDataServiceImpl.java

@Override
protected Evaluated<PatientDataDefinition> executeEvaluator(
        DefinitionEvaluator<PatientDataDefinition> evaluator, PatientDataDefinition definition,
        EvaluationContext context) throws EvaluationException {

    EvaluatedPatientData ret = new EvaluatedPatientData(definition, context);
    int batchSize = ReportingConstants.GLOBAL_PROPERTY_DATA_EVALUATION_BATCH_SIZE();

    // Do not evaluate in batches if no base cohort is supplied, or no batch size is specified
    if (context.getBaseCohort() == null || batchSize <= 0 || context.getBaseCohort().size() <= batchSize) {
        return super.executeEvaluator(evaluator, definition, context);
    }//from w  w  w .  j a  v a 2s. com

    if (context.getBaseCohort().size() > 0) {

        List<Cohort> batches = new ArrayList<Cohort>();
        List<Integer> ids = new ArrayList<Integer>(context.getBaseCohort().getMemberIds());
        for (int i = 0; i < ids.size(); i += batchSize) {
            batches.add(new Cohort(ids.subList(i, i + Math.min(batchSize, ids.size() - i))));
        }
        log.info("Number of batches to execute: " + batches.size());

        // Evaluate each batch
        for (Cohort batchCohort : batches) {
            EvaluationContext batchContext = context.shallowCopy();
            batchContext.setBaseCohort(batchCohort);
            batchContext.clearCache(); // Setting base cohort should do this, but just to be sure

            StopWatch timer = new StopWatch();
            timer.start();

            EvaluatedPatientData batchData = (EvaluatedPatientData) super.executeEvaluator(evaluator,
                    definition, batchContext);
            ret.getData().putAll(batchData.getData());

            timer.stop();
            log.debug("Evaluated batch: " + timer.toString());
            log.debug("Number of running data evaluated: " + ret.getData().size());

            timer.reset();

            Context.flushSession();
            Context.clearSession();
        }
    }
    return ret;
}

From source file:org.sonatype.nexus.integrationtests.nexus748.Nexus748MultipleStart.java

@Test
public void multipleStartTest() throws Exception {

    StopWatch stopWatch = new StopWatch();

    NexusClient client = (NexusClient) getITPlexusContainer().lookup(NexusClient.ROLE);
    TestContext context = TestContainer.getInstance().getTestContext();
    client.connect(AbstractNexusIntegrationTest.nexusBaseUrl, context.getAdminUsername(),
            context.getAdminPassword());

    // enable security
    getNexusConfigUtil().enableSecurity(true);

    List<Long> startTimes = new ArrayList<Long>();

    for (int ii = 0; ii < 10; ii++) {
        // start the timer
        stopWatch.reset();
        stopWatch.start();/*from ww  w .  ja  v  a 2s  .c  om*/

        // start
        getNexusStatusUtil().start(getTestId());

        Assert.assertTrue(client.isNexusStarted(true));

        // get the time
        stopWatch.stop();

        // stop
        getNexusStatusUtil().stop();

        startTimes.add(stopWatch.getTime());
    }

    logger.info("\n\n**************\n Start times: \n**************");

    logger.info("Iter\tTime");
    for (int ii = 0; ii < startTimes.size(); ii++) {
        Long startTime = startTimes.get(ii);
        logger.info(" " + (ii + 1) + "\t " + (startTime / 1000.0) + "sec.");

    }

}

From source file:org.trend.hgraph.mapreduce.pagerank.CalculatePageRankReducer.java

@Override
protected void reduce(Text key, Iterable<DoubleWritable> incomingPageRanks, Context context)
        throws IOException, InterruptedException {

    String rowkey = Bytes.toString(key.getBytes()).trim();
    double incomingPageRankSum = 0.0D;
    StopWatch sw = new StopWatch();
    sw.start();// w w  w  . ja  v a 2  s .c o m
    for (DoubleWritable incomingPageRank : incomingPageRanks) {
        incomingPageRankSum = incomingPageRankSum + incomingPageRank.get();
    }
    // calculate new pageRank here
    double newPageRank = (dampingFactor * incomingPageRankSum) + ((1.0D - dampingFactor) / verticesTotalCnt);
    sw.stop();
    context.getCounter(Counters.CAL_NEW_PR_TIME_CONSUMED).increment(sw.getTime());

    sw.reset();
    sw.start();
    double oldPageRank = Utils.getPageRank(vertexTable, rowkey, Constants.PAGE_RANK_CQ_TMP_NAME);
    if (!pageRankEquals(oldPageRank, newPageRank, pageRankCompareScale)) {
        // collect pageRank changing count with counter
        context.getCounter(Counters.CHANGED_PAGE_RANK_COUNT).increment(1);
    }
    sw.stop();
    context.getCounter(Counters.CMP_OLD_NEW_PR_TIME_CONSUMED).increment(sw.getTime());

    context.write(key, new DoubleWritable(newPageRank));
}

From source file:org.trend.hgraph.VariousTest.java

@Test
@Ignore //for test StopWatch behavior
public void testStopWatch() throws InterruptedException {
    StopWatch timer = new StopWatch();

    // #1//from   w  w w.  j  a  va2  s. co m
    timer.start();
    Thread.sleep(5000);
    timer.stop();

    System.out.println("timer.toString=" + timer.toString());
    //    System.out.println("timer.toSplitString=" + timer.toSplitString());

    // #2
    timer.reset();
    timer.start();
    Thread.sleep(4000);
    timer.split();
    System.out.println("timer.toSplitString=" + timer.toSplitString());
    Thread.sleep(5000);
    timer.split();
    System.out.println("timer.toSplitString=" + timer.toSplitString());
    timer.unsplit();
    Thread.sleep(6000);
    timer.stop();

    System.out.println("timer.toString=" + timer.toString());

}

From source file:pt.ua.tm.neji.parsing.TestDynamicParsing.java

public void test() throws IOException, NejiException {
    Constants.verbose = true;/*from  w  w w . j a  va  2 s .co m*/
    StopWatch timer = new StopWatch();

    // create corpus
    Corpus corpus = new Corpus();
    corpus.setText(Variables.str1.gdep.text);

    // readies the parser
    Variables.parseWithDepParser(ParserLevel.TOKENIZATION, corpus, Variables.str1.gdep.text);

    // test if only tokenization was performed, no dependency features, lemmas, pos or chunks should exist
    timer.start();
    List<Sentence> sentences1 = Variables.parseWithDepParser(ParserLevel.TOKENIZATION, corpus,
            Variables.str1.gdep.text);
    timer.stop();
    logger.info("{}", sentences1.get(0).toExportFormat());
    logger.info("Tokenization took {}", timer.toString());
    timer.reset();

    // test if only lemmatization was performed, no dependency features, pos or chunks should exist
    timer.start();
    List<Sentence> sentences2 = Variables.parseWithDepParser(ParserLevel.LEMMATIZATION, corpus,
            Variables.str1.gdep.text);
    timer.stop();
    logger.info("{}", sentences2.get(0).toExportFormat());
    logger.info("Lemmatization took {}", timer.toString());
    timer.reset();

    // test if only pos was performed, no dependency features nor chunks should exist
    timer.start();
    List<Sentence> sentences3 = Variables.parseWithDepParser(ParserLevel.POS, corpus, Variables.str1.gdep.text);
    timer.stop();
    logger.info("{}", sentences3.get(0).toExportFormat());
    logger.info("POS took {}", timer.toString());
    timer.reset();

    // test if only chunking was performed, no dependency features should exist
    timer.start();
    List<Sentence> sentences4 = Variables.parseWithDepParser(ParserLevel.CHUNKING, corpus,
            Variables.str1.gdep.text);
    timer.stop();
    logger.info("{}", sentences4.get(0).toExportFormat());
    logger.info("Chunking took {}", timer.toString());
    timer.reset();

    // test if dependency parsing was performed
    timer.start();
    List<Sentence> sentences5 = Variables.parseWithDepParser(ParserLevel.DEPENDENCY, corpus,
            Variables.str1.gdep.text);
    timer.stop();
    logger.info("{}", sentences5.get(0).toExportFormat());
    logger.info("Dependency took {}", timer.toString());
    timer.reset();

}

From source file:test.bgp.OldTestDQP.java

public void testLocal() throws EngineException, MalformedURLException, IOException {
    Graph graph = Graph.create();/*from   w w w. jav  a  2  s . co m*/
    QueryProcess exec = QueryProcessDQP.create(graph);

    StopWatch sw = new StopWatch();
    sw.start();
    logger.info("Initializing GraphEngine, entailments: " + graph.getEntailment());
    Load ld = Load.create(graph);
    logger.info("Initialized GraphEngine: " + sw.getTime() + " ms");

    sw.reset();
    sw.start();
    try {
        ld.parseDir(OldTestDQP.class.getClassLoader().getResource("demographie").getPath() + "/cog-2012.ttl");
    } catch (LoadException ex) {
        LogManager.getLogger(OldTestDQP.class.getName()).log(Level.ERROR, "", ex);
    }

    try {
        ld.parseDir(
                OldTestDQP.class.getClassLoader().getResource("demographie").getPath() + "/popleg-2010.ttl");
    } catch (LoadException ex) {
        LogManager.getLogger(OldTestDQP.class.getName()).log(Level.ERROR, "", ex);
    }

    logger.info("Graph size: " + graph.size());

    for (String q : queries) {
        logger.info("Querying with : \n" + q);
        for (int i = 0; i < 10; i++) {
            sw.reset();
            sw.start();
            Mappings results = exec.query(q);
            logger.info(results.size() + " results: " + sw.getTime() + " ms");
        }
    }
}

From source file:test.distribution.ServiceTest.java

@Test
@Ignore/*from  ww  w.j  a v  a 2 s.  co m*/
public void serviceDebugTest() throws EngineException, MalformedURLException, IOException {

    String query = "PREFIX idemo:<http://rdf.insee.fr/def/demo#> \n"
            + "PREFIX igeo:<http://rdf.insee.fr/def/geo#> \n" + "SELECT ?region  WHERE { \n"
            + "    SERVICE <http://localhost:9091/kgram/sparql> {\n"
            + "         ?region igeo:codeRegion \"24\" .\n"
            + "         ?region igeo:subdivisionDirecte ?departement .\n" + "    }\n" + "} ";

    //---------------Service grouping-----------------------
    Graph g1 = Graph.create();
    //        execDQP1.addVisitor(new ServiceQueryVisitorPar(execDQP1));
    ProviderImplCostMonitoring sProv = ProviderImplCostMonitoring.create();
    QueryProcessDQP execDQP = QueryProcessDQP.create(g1, sProv, false);

    StopWatch sw = new StopWatch();
    sw.start();
    Mappings maps1 = execDQP.query(query);
    System.out.println(maps1);
    System.out.println("[service] Results size " + maps1.size() + " in " + sw.getTime() + " ms");
    sw.stop();
    sw.reset();
}

From source file:ubc.pavlab.aspiredb.server.service.PhenotypeBrowserServiceImpl.java

private Map<String, PhenotypeSummary> constructPhenotypeSummarys(Collection<Phenotype> phenotypes,
        Collection<Long> subjectIds, Collection<Long> projectIds) throws NeurocartaServiceException {

    Map<String, PhenotypeSummary> phenotypeNameToSummary = new LinkedHashMap<String, PhenotypeSummary>();

    // checking if a phenotype is a NeuroPhenoCarta Phenotype takes a while and for large 'special' projects
    // prohibitively so. disable this for special projects.
    Boolean containsLargeSpecialProject = false;

    Collection<Project> projects = projectDao.load(projectIds);

    for (Project p : projects) {

        if (p.getSpecialData() != null && p.getSpecialData()) {

            containsLargeSpecialProject = true;
            log.info("constructing phenotypeSummaries for 'SPECIAL' project, some data will not be filled");
            break;

        }/* w  ww. ja  v  a  2s .  c  o  m*/

    }

    // Make PhenotypeSummaryValueObjects and populate their value counts.
    StopWatch timer = new StopWatch();
    timer.start();

    // collect all possible values and store in memory instead of querying the database
    Map<String, Collection<String>> phenotypeValueMap = new HashMap<>();
    for (Phenotype phenotype : phenotypes) {
        if (!phenotypeValueMap.containsKey(phenotype.getName())) {
            phenotypeValueMap.put(phenotype.getName(), new HashSet<String>());
        }
        phenotypeValueMap.get(phenotype.getName()).add(phenotype.getValue());
    }

    log.info("constructing PhenotypeSummaryValueObject for " + phenotypes.size()
            + " phenotypes, specialproject=" + containsLargeSpecialProject);
    for (Phenotype phenotype : phenotypes) {
        PhenotypeSummary phenotypeSummary = phenotypeNameToSummary.get(phenotype.getName());

        // Create new PhenotypeSummaryValueObject.
        if (phenotypeSummary == null) {

            Collection<String> possibleValues = new ArrayList<String>();

            // all possible values of only large special project are HPO so this database call is unnecessary
            if (!containsLargeSpecialProject) {

                possibleValues = phenotypeValueMap.get(phenotype.getName());

            } else {
                possibleValues.add("1");
                possibleValues.add("0");
            }

            phenotypeSummary = makePhenotypeSummary(phenotype, possibleValues, subjectIds,
                    containsLargeSpecialProject);
            phenotypeNameToSummary.put(phenotype.getName(), phenotypeSummary);
            // FIXME: Anton's temporary hack
            if ((possibleValues.size() == 1 && (possibleValues.contains("1") || possibleValues.contains("0")))
                    || (possibleValues.size() == 2 && possibleValues.contains("1")
                            && possibleValues.contains("0"))) {
                phenotypeSummary.setInferredBinaryType(true);
            }
        }

        addSubjectToPhenotypeCountingSet(phenotypeSummary, phenotype);

    }
    log.info("construction PhenotypeSummaryValueObject for " + phenotypes.size() + " phenotoypes took "
            + timer.getTime() + "ms");

    if (!containsLargeSpecialProject) {
        timer.reset();
        timer.start();

        log.info("initializeInferredPhenotypes for " + phenotypeNameToSummary.values().size()
                + " phenotypesummaryValueobjects");
        for (PhenotypeSummary summary : phenotypeNameToSummary.values()) {
            summary.initializeInferredPhenotypes();
        }

        log.info("initializeInferredPhenotypes took " + timer.getTime() + "ms");
    }

    return phenotypeNameToSummary;
}

From source file:ubic.BAMSandAllen.optimize.GreedyMultiThreaded.java

/**
 * Iteratively remove rows from the B data matrix of the matrix pair, each
 * time increasing the correlation the maximum possible
 * /*from ww w .  j  av  a  2  s  . c o m*/
 * @param iterations
 *            number of iterations to perform
 * @param slow
 *            indicates whether to re-compute regressions for every gene
 *            removal test
 * @throws Exception
 */
public void run(int iterations, boolean slow, boolean keepSign) throws Exception {
    StopWatch watch = new StopWatch();
    watch.start();
    StopWatch smallWatch = new StopWatch();
    long startTime = System.currentTimeMillis();

    double firstBaseLine = pair.getCorrelation(true);
    // make it more negative if it starts below zero
    boolean increase = firstBaseLine > 0;
    // force the increase on random runs with low correlation
    if (Math.abs(firstBaseLine) < 0.1)
        increase = true;

    // if we are going against the current correlation sign - eg go from
    // positive correlation to negative
    if (!keepSign) {
        increase = !increase;
    }

    List<GreedyThreadRunner> runners = new LinkedList<GreedyThreadRunner>();

    // create the runners
    for (int threadInd = 0; threadInd < threads; threadInd++) {
        MatrixPair pairCopy = (MatrixPair) deepCopy(pair);
        runners.add(new GreedyThreadRunner(pairCopy, slow, increase));
    }

    for (int i = 0; i < iterations; i++) {
        smallWatch.reset();
        smallWatch.start();

        ExecutorService pool;
        pool = Executors.newFixedThreadPool(threads);
        // divide up the rows
        List<String> rows = pair.getMatrixBDataRows();
        List<Collection<String>> splits = split(rows, threads);

        double baseLine = pair.getCorrelation(true);

        log.info("Base correlation:" + baseLine + " size:" + pair.getMatrixBDataRows().size());

        // set the baseline and call the runners
        for (int threadInd = 0; threadInd < threads; threadInd++) {
            GreedyThreadRunner runner = runners.get(threadInd);
            runner.setBaseline(baseLine);

            // set residual calculation triangles to match accross all
            // threads, if we are doing partial regression
            if (pair instanceof ConnectivityAndAllenPartialExpressionMatrixPair) {
                ConnectivityAndAllenPartialExpressionMatrixPair partialPair = ((ConnectivityAndAllenPartialExpressionMatrixPair) pair);
                // only do it if we are regressing on expression
                if (partialPair
                        .getRegressType() == ConnectivityAndAllenPartialExpressionMatrixPair.RegressMatrix.BOTH
                        && partialPair
                                .getRegressType() == ConnectivityAndAllenPartialExpressionMatrixPair.RegressMatrix.EXPRESSION) {
                    RegressionVector triangles = partialPair.getTrianglesMatrixB();
                    runner.setTrianglesMatrixB(triangles);
                }
            }

            runner.setRowsToTest(splits.get(threadInd));
            // log.info( "Split size:" + splits.get( threadInd ).size() );
            pool.execute(runner);
        }

        // log.info( "Waiting for threads to finish" );
        pool.shutdown();
        pool.awaitTermination(15, TimeUnit.MINUTES);

        // go through all the runners and get results
        double bestIncrease = Double.MAX_VALUE * -1;
        String bestRow = null;
        for (GreedyThreadRunner runner : runners) {
            double diff = runner.getBestIncrease();
            if (!increase) {
                diff *= -1;
            }
            if (diff > bestIncrease) {
                bestRow = runner.getBestRow();
                bestIncrease = diff;
            }
        }

        if (bestRow == null) {
            log.info("No best row found " + pair.getCorrelation(true));
            log.info("Putting remaining " + rows.size() + " genes at end of file");
            for (String row : rows) {
                outputRowToFile(startTime, row);
                FileTools.stringToFile(i + "," + baseLine + "," + row + "\n",
                        new File(SetupParameters.getDataFolder() + "LOOResults." + startTime + ".txt"), true);
            }
            break;
        }

        pair.removeMatrixBDataRowFast(bestRow);
        for (GreedyThreadRunner runner : runners) {
            runner.removeRow(bestRow);
        }

        outputRowToFile(startTime, bestRow);
        // write correlation

        FileTools.stringToFile(i + "," + baseLine + "," + bestRow + "\n",
                new File(SetupParameters.getDataFolder() + "LOOResults." + startTime + ".txt"), true);

        int eta = (int) (smallWatch.getTime() / 1000) / 2 * pair.getMatrixBDataRows().size() / 3600;
        log.info(bestRow + " changes correlation by " + bestIncrease + " time:" + (smallWatch.getTime() / 1000)
                + "s total:" + (watch.getTime() / 1000) + "s estimated hours remaining:" + eta);
    }
    log.info("Start time:" + startTime);
    FileTools.stringToFile(startTime + "\n",
            new File(SetupParameters.getDataFolder() + "Link." + startTime + ".txt"), true);

    // make a link between it's output files and starttime - akward hack
    File jarFile = new File(this.getClass().getProtectionDomain().getCodeSource().getLocation().toURI());
    FileTools.stringToFile(jarFile.toString() + "\n",
            new File(SetupParameters.getDataFolder() + "Link." + startTime + ".txt"), true);

}