Example usage for org.apache.commons.lang.time StopWatch toString

List of usage examples for org.apache.commons.lang.time StopWatch toString

Introduction

In this page you can find the example usage for org.apache.commons.lang.time StopWatch toString.

Prototype

public String toString() 

Source Link

Document

Gets a summary of the time that the stopwatch recorded as a string.

The format used is ISO8601-like, hours:minutes:seconds.milliseconds.

Usage

From source file:pt.ua.tm.neji.batch.FileBatchExecutor.java

@Override
public void run(Class<? extends Processor> processorCls, Context context, Object... args) throws NejiException {
    //        System.setProperty("file.encoding", "UTF-8");

    logger.info("Initializing context...");
    context.initialize();// www. j ava2  s. c  o  m
    logger.info("Installing multi-threading support...");
    context.addMultiThreadingSupport(numThreads);

    //        try {
    //        logger.info("Starting thread pool with support for {} threads...", numThreads);
    //            executor = Executors.newFixedThreadPool(numThreads, new ProcessorThreadFactory());

    StopWatch timer = new StopWatch();
    timer.start();

    //            CorpusDirWalker walker = new CorpusDirWalker(processorCls, context,
    //                    inputWildcardFilter, compressed, storeDocuments, args);
    //
    //        // Store processed corpora
    //            walker.processFiles();
    //
    //            executor.shutdown();
    //            executor.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS);

    filesProcessed = processFiles(inputFolderPath, inputWildcardFilter, outputFolderPath, numThreads, context,
            processorCls, args);

    logger.info("Stopped thread pool.");

    logger.info("Terminating context...");
    context.terminate();

    timer.stop();
    logger.info("Processed {} files in {}", filesProcessed, timer.toString());
    //        } catch (IOException | InterruptedException ex) {
    //            throw new NejiException("Problem processing pipeline.", ex);
    //        }
}

From source file:pt.ua.tm.neji.evaluation.craft.statistics.FolderBatchExecutor.java

public void run(final Context context) throws NejiException {
    logger.info("Initializing context...");
    context.initialize();/*from   w w w  .  ja  v a  2 s  .  c  om*/
    logger.info("Installing multi-threading support...");
    context.addMultiThreadingSupport(numThreads);

    ExecutorService executor;

    logger.info("Starting thread pool with support for {} threads...", numThreads);
    executor = Executors.newFixedThreadPool(numThreads);

    StopWatch timer = new StopWatch();
    timer.start();

    File inputFolder = new File(inputFolderPath);
    File[] files = inputFolder.listFiles(new FileUtil.Filter(new String[] { "txt" }));

    for (File file : files) {
        //            File a1File = new File(file.getAbsolutePath().replaceAll(".txt", ".ann"));
        File a1File = new File(file.getAbsolutePath().replaceAll(".txt", ".a1"));
        Processor processor = getDocumentProcessor(file, a1File, context);

        // Process entry
        executor.execute(processor);
    }

    executor.shutdown();
    try {
        executor.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS);
    } catch (InterruptedException e) {
        throw new RuntimeException(e);
    }
    logger.info("Stopped thread pool.");

    logger.info("Terminating context...");
    context.terminate();

    timer.stop();
    logger.info("Processed {} files in {}", processedCorpora.size(), timer.toString());
}

From source file:pt.ua.tm.neji.parsing.TestDynamicParsing.java

public void test() throws IOException, NejiException {
    Constants.verbose = true;//from  w w w.j av  a  2 s . c om
    StopWatch timer = new StopWatch();

    // create corpus
    Corpus corpus = new Corpus();
    corpus.setText(Variables.str1.gdep.text);

    // readies the parser
    Variables.parseWithDepParser(ParserLevel.TOKENIZATION, corpus, Variables.str1.gdep.text);

    // test if only tokenization was performed, no dependency features, lemmas, pos or chunks should exist
    timer.start();
    List<Sentence> sentences1 = Variables.parseWithDepParser(ParserLevel.TOKENIZATION, corpus,
            Variables.str1.gdep.text);
    timer.stop();
    logger.info("{}", sentences1.get(0).toExportFormat());
    logger.info("Tokenization took {}", timer.toString());
    timer.reset();

    // test if only lemmatization was performed, no dependency features, pos or chunks should exist
    timer.start();
    List<Sentence> sentences2 = Variables.parseWithDepParser(ParserLevel.LEMMATIZATION, corpus,
            Variables.str1.gdep.text);
    timer.stop();
    logger.info("{}", sentences2.get(0).toExportFormat());
    logger.info("Lemmatization took {}", timer.toString());
    timer.reset();

    // test if only pos was performed, no dependency features nor chunks should exist
    timer.start();
    List<Sentence> sentences3 = Variables.parseWithDepParser(ParserLevel.POS, corpus, Variables.str1.gdep.text);
    timer.stop();
    logger.info("{}", sentences3.get(0).toExportFormat());
    logger.info("POS took {}", timer.toString());
    timer.reset();

    // test if only chunking was performed, no dependency features should exist
    timer.start();
    List<Sentence> sentences4 = Variables.parseWithDepParser(ParserLevel.CHUNKING, corpus,
            Variables.str1.gdep.text);
    timer.stop();
    logger.info("{}", sentences4.get(0).toExportFormat());
    logger.info("Chunking took {}", timer.toString());
    timer.reset();

    // test if dependency parsing was performed
    timer.start();
    List<Sentence> sentences5 = Variables.parseWithDepParser(ParserLevel.DEPENDENCY, corpus,
            Variables.str1.gdep.text);
    timer.stop();
    logger.info("{}", sentences5.get(0).toExportFormat());
    logger.info("Dependency took {}", timer.toString());
    timer.reset();

}

From source file:pt.ua.tm.neji.train.batch.TrainBatchExecutor.java

@Override
public void run(Class<? extends Processor> processorCls, Context context, Object... args) throws NejiException {

    StopWatch timer = new StopWatch();

    logger.info("Initializing context...");
    context.initialize();//  w w w. j  ava2  s.c  om

    timer.start();

    if (((TrainContext) context).getPhase() == 1) { // Phase 1            

        // If input format requires annotations
        if (context.getConfiguration().getInputFormat().equals(InputFormat.BC2)) { // File + Annotations formats
            processFiles(inputSentencesFilePath, inputAnnotationsFilePath, (TrainContext) context, processorCls,
                    args);
        } else if (context.getConfiguration().getInputFormat().equals(InputFormat.A1)) { // Folder format
            processMultipleFiles(inputSentencesFilePath, numThreads, (TrainContext) context, processorCls,
                    args);
        } else { // File formats 
            processFiles(inputSentencesFilePath, (TrainContext) context, processorCls, args);
        }
    } else { // Phase 2

        // In this case inputSentencesFilePath contains the path to the corpus
        processFiles2((TrainContext) context, processorCls, args);
    }

    logger.info("Terminating context...");
    context.terminate();

    timer.stop();
    logger.info("Processed files in {}", timer.toString());
}

From source file:pt.ua.tm.neji.web.batch.ServerBatchExecutor.java

@Override
public void run(Class<? extends Processor> processorCls, Context context, Object... args) throws NejiException {

    // Add false positives
    if (service.getFalsePositives() != null) {
        byte[] fpByte = service.getFalsePositives().getBytes();
        context.getConfiguration().setFalsePositives(fpByte);
    } else {// www .ja  v a  2  s  . c om
        context.getConfiguration().setFalsePositives(null);
    }

    // Add semantic groups normalization (just when exporting, if format 
    // equals to null, then is an annotate)        
    if ((format != null) && !service.getGroupsNormalization().isEmpty()) {
        byte[] gnByte = service.getGroupsNormalizationByteArray();
        context.getConfiguration().setSemanticGroupsNormalization(gnByte);
    } else {
        context.getConfiguration().setSemanticGroupsNormalization(null);
    }

    // Distribution of output streams to the pipeline
    Map<OutputFormat, OutputStream> formatToStreamMap = new HashMap<>();
    List<OutputStream> outputStreams = new ArrayList<>();

    for (OutputFormat f : context.getConfiguration().getOutputFormats()) {
        OutputStream o = new ByteArrayOutputStream();
        formatToStreamMap.put(f, o);
        outputStreams.add(o);
    }

    Processor processor;
    Pipeline p = new DefaultPipeline(corpus);
    try {
        if (args != null && args.length != 0) {
            processor = newProcessor(processorCls, context, inputStream, outputStreams, service, p, groups,
                    filterGroups, args);
        } else {
            processor = newProcessor(processorCls, context, inputStream, outputStreams, service, p, groups,
                    filterGroups);
        }
    } catch (NejiException ex) {
        String m = "There was a problem creating the server processor";
        logger.error(m, ex);
        throw new RuntimeException(m, ex);
    }

    logger.info("");
    logger.info("Started processing a new document...");
    StopWatch timer = new StopWatch();
    timer.start();

    executor.execute(processor);

    try {
        synchronized (processor) {
            processor.wait();
        }
    } catch (InterruptedException ex) {
        throw new RuntimeException("There was a problem running the annotation service.", ex);
    }

    timer.stop();
    logger.info("Processed document in {}", timer.toString());

    if (format != null) {
        OutputStream output = formatToStreamMap.get(format);
        annotatedText = output.toString();
    }
}

From source file:terrastore.util.io.SerializersComparisonTest.java

@Test
public void testJavaSerializer() {
    Value value = new Value(VALUE.getBytes());
    JavaSerializer<Value> serializer = new JavaSerializer();
    StopWatch sw = new StopWatch();
    System.out.println("Warm-up...");
    for (int i = 0; i < 1000; i++) {
        serializer.serialize(value);//from  www.j  a v a2 s .c  o m
    }
    //
    System.out.println("Measuring...");
    sw.start();
    for (int i = 0; i < 100000; i++) {
        serializer.serialize(value);
    }
    sw.stop();
    System.out.println("Elapsed for testJavaSerializer: " + sw.toString());
}

From source file:terrastore.util.io.SerializersComparisonTest.java

@Test
public void testMsgPackSerializer() {
    Value value = new Value(VALUE.getBytes());
    MsgPackSerializer<Value> serializer = new MsgPackSerializer(false);
    StopWatch sw = new StopWatch();
    System.out.println("Warm-up...");
    for (int i = 0; i < 1000; i++) {
        serializer.serialize(value);/*  www.  j a  v a2s .  c  o m*/
    }
    //
    System.out.println("Measuring...");
    sw.start();
    for (int i = 0; i < 100000; i++) {
        serializer.serialize(value);
    }
    sw.stop();
    System.out.println("Elapsed for testMsgPackSerializer: " + sw.toString());
}

From source file:ubic.pubmedgate.mallet.FoldRunner.java

public void run() {
    train(training);//from   www.j  a  v a2  s. c om
    StopWatch watch = new StopWatch();
    watch.start();
    test(testing);
    watch.stop();
    log.info("Testing classifcation took " + watch.toString() + " for " + testing.size());
    done = true;
    if (notifyWhenDone != null) {
        synchronized (notifyWhenDone) {
            notifyWhenDone.notifyAll();
        }
    }
}

From source file:ubic.pubmedgate.resolve.CreateSupplementRDF.java

/**
 * @param args//from   w  w  w. jav a2 s  . com
 */
public static void main(String[] args) throws Exception {
    // load in resolutions
    boolean addSpecies = true;
    boolean reason = true;
    boolean useUnseenCorp = true;

    String file = "resolve.Lexicon.resolution.RDF.allComp";

    //String file = "resolve.Lexicon.RDF.allComp";
    file = Config.config.getString(file);
    Model modelLoad = ModelFactory.createDefaultModel();
    FileInputStream fis = new FileInputStream(file);
    modelLoad.read(fis, null);
    fis.close();

    if (addSpecies) {
        log.info("Adding species info");
        String storeLoc = Config.config.getString("whitetext.datastore.location");
        MakeLexiconRDFModel lexiModel = new MakeLexiconRDFModel();
        lexiModel.setModel(modelLoad);
        lexiModel.addSpeciesToModel(storeLoc, useUnseenCorp);
    }

    EvaluationRDFModel evaluationModel = new EvaluationRDFModel(modelLoad, reason);

    evaluationModel.loadManualMatches();
    evaluationModel.loadManualEvaluations();
    evaluationModel.loadAutomaticEvaluations();
    //
    //
    log.info("Writing out");
    StopWatch stopwatch = new StopWatch();
    stopwatch.start();
    evaluationModel.writeOut(Config.config.getString("resolve.supplement.RDF"));
    log.info(stopwatch.toString());

    evaluationModel.getStats();

}

From source file:ubic.pubmedgate.treetagger.TreeTaggerRunner.java

/**
 * @param args/*from   w  w  w .j  a va 2  s  .c om*/
 */
public static void main(String[] args) throws Exception {
    String annotationSet = "TreeTagger";
    // set GATE to use gate tokens instead of treetaggers tokenization,
    // untested
    boolean GATETokens = true;
    if (GATETokens) {
        annotationSet += "GATETokens";
    }

    StopWatch watch = new org.apache.commons.lang.time.StopWatch();
    watch.start();
    GateInterface p2g = new GateInterface();

    // Corpus corp = p2g.getCorp();
    // Corpus corp = p2g.getTrainingCorp();
    // Corpus corp = p2g.getNoAbbrevCorp();
    Corpus corp = p2g.getUnseenCorp();
    TreeTaggerRunner runner = new TreeTaggerRunner(annotationSet, GATETokens);

    // code to test a single document
    // Document test = ( Document ) p2g.getUnseenCorp().get( 17761 );
    // runner.runTreeTagger( new ConnectionsDocument( test ) );
    // System.exit( 1 );

    GateReseter reset = new GateReseter(GateInterface.getDocuments(corp), annotationSet);
    reset.reset();
    log.info("Done reseting");

    // 128 had a problem..
    // runner.runTreeTagger( new ConnectionsDocument( ( Document )
    // p2g.getTrainingCorp().get( 128 ) ) );
    // System.exit( 1 );

    System.out.println("Time:" + watch.toString());
    int i = 0;
    int errors = 0;
    for (ConnectionsDocument doc : GateInterface.getDocuments(corp)) {
        log.info(doc.getName());
        try {
            runner.runTreeTagger(doc);
            doc.sync();
        } catch (Exception e) {
            e.printStackTrace();
            errors++;
        }
        log.info("i:" + i++ + " Time:" + watch.toString() + " errors:" + errors);
    }

}