Example usage for org.apache.commons.lang3.time StopWatch resume

List of usage examples for org.apache.commons.lang3.time StopWatch resume

Introduction

In this page you can find the example usage for org.apache.commons.lang3.time StopWatch resume.

Prototype

public void resume() 

Source Link

Document

Resume the stopwatch after a suspend.

Usage

From source file:org.alfresco.bm.event.AbstractEventProcessor.java

/**
 * Continue timing the event processing/*from  w  w w  .ja  v a2 s.  com*/
 * 
 * @throws IllegalStateException        if the timer is is not suspended
 */
protected long resumeTimer() {
    StopWatch stopWatch = getStopWatch();
    stopWatch.resume();
    return stopWatch.getTime();
}

From source file:org.trnltk.experiment.bruteforce.BruteForceExperiments.java

@Test
public void shouldParseTbmmJournal_b0241h() throws IOException {
    final File tokenizedFile = new File("core/src/test/resources/tokenizer/tbmm_b0241h_tokenized.txt");
    final List<String> lines = Files.readLines(tokenizedFile, Charsets.UTF_8);
    final LinkedList<String> words = new LinkedList<String>();
    for (String line : lines) {
        words.addAll(Lists.newArrayList(Splitter.on(" ").trimResults().omitEmptyStrings().split(line)));
    }//from www.  j  av  a  2 s  .  com

    final StopWatch stopWatch = new StopWatch();
    int parseResultCount = 0;
    final int MAX_WORD_LENGTH = 100;

    int[] wordCountsByLength = new int[MAX_WORD_LENGTH];
    int[] parseResultCountTotalsByTokenLength = new int[MAX_WORD_LENGTH];

    stopWatch.start();
    stopWatch.suspend();

    for (String word : words) {
        stopWatch.resume();
        final LinkedList<MorphemeContainer> morphemeContainers = parser.parse(new TurkishSequence(word));
        stopWatch.suspend();
        if (morphemeContainers.isEmpty())
            System.out.println("Word is not parsable " + word);
        parseResultCount += morphemeContainers.size();
        parseResultCountTotalsByTokenLength[word.length()] += morphemeContainers.size();
        wordCountsByLength[word.length()]++;
    }

    stopWatch.stop();

    final double[] parseResultCountAvgsByLength = new double[MAX_WORD_LENGTH];
    for (int i = 0; i < parseResultCountTotalsByTokenLength.length; i++) {
        int totalParseResultCount = parseResultCountTotalsByTokenLength[i];
        final int wordCount = wordCountsByLength[i];
        parseResultCountAvgsByLength[i] = Double.valueOf(totalParseResultCount) / Double.valueOf(wordCount);
    }

    System.out.println("Total time :" + stopWatch.toString());
    System.out.println("Nr of tokens : " + words.size());
    System.out.println("Nr of parse results : " + parseResultCount);
    System.out.println("Avg time : " + (stopWatch.getTime() * 1.0d) / (words.size() * 1.0d) + " ms");
    System.out.println("Avg parse result count : " + (parseResultCount * 1.0) / (words.size() * 1.0));
    System.out.println("Word counts by token length " + "\n\t" + Arrays.toString(wordCountsByLength));
    System.out.println("Parse result count totals by token length " + "\n\t"
            + Arrays.toString(parseResultCountTotalsByTokenLength));
    System.out.println("Parse result count avgs by token length " + "\n\t"
            + Arrays.toString(parseResultCountAvgsByLength));
}

From source file:org.trnltk.tokenizer.TextTokenizerCorpusTest.java

protected static void createTokenizedFile(TextTokenizer tokenizer, File sentencesFile, File tokenizedFile,
        File errorFile, boolean silent, TokenizationCommandCallback tokenizationCommandCallback)
        throws IOException {
    int N = 10000;

    final StopWatch tokenizationStopWatch = new StopWatch();
    tokenizationStopWatch.start();/*  w  ww .j av  a  2 s  . co  m*/
    tokenizationStopWatch.suspend();

    //        final BufferedReader lineReader = Files.newReader(sentencesFile, Charsets.UTF_8);       // don't read the file into the memory
    //        final int lineCount = lineCount(sentencesFile);     // I want to know this in advance to make a ETA statement

    final List<String> sentences = Files.readLines(sentencesFile, Charsets.UTF_8);
    final int lineCount = sentences.size();

    if (!silent)
        System.out.println("Number of lines in the file : " + lineCount);

    final BufferedWriter tokensWriter = Files.newWriter(tokenizedFile, Charsets.UTF_8);
    final PrintWriter errorWriter = errorFile != null
            ? new PrintWriter(Files.newWriter(errorFile, Charsets.UTF_8))
            : new PrintWriter(System.out);

    int numberOfLinesInError = 0;
    int tokenCount = 0;
    try {
        //            for (Iterator<String> iterator = sentences.iterator(); iterator.hasNext(); ) {
        //              String sentence = iterator.next();
        int index;
        for (index = 0; index < sentences.size(); index++) {
            final String sentence = sentences.get(index);
            if (!silent && index % 10000 == 0) {
                System.out.println("Tokenizing line #" + index);
                final long totalTimeSoFar = tokenizationStopWatch.getTime();
                final double avgTimeForALine = Long.valueOf(totalTimeSoFar).doubleValue() / index;
                final double remainingTimeEstimate = avgTimeForALine * (lineCount - index);
                System.out.println("For file --> ETA : "
                        + DurationFormatUtils.formatDurationHMS((long) remainingTimeEstimate) + " So far : "
                        + tokenizationStopWatch.toString());
            }
            if (tokenizationCommandCallback != null && index % N == 0) {
                tokenizationCommandCallback.reportProgress(N);
            }
            tokenizationStopWatch.resume();
            final Iterable<Token> tokens;
            try {
                tokens = tokenizer.tokenize(sentence);
            } catch (Exception e) {
                // skip the line
                numberOfLinesInError++;
                e.printStackTrace(errorWriter);
                errorWriter.println();
                tokenizationStopWatch.suspend();
                continue;
            }
            tokenizationStopWatch.suspend();
            final Iterator<Token> tokensIterator = tokens.iterator();
            while (tokensIterator.hasNext()) {
                final Token token = tokensIterator.next();
                tokensWriter.write(token.getSurface());
                tokenCount++;
                if (tokensIterator.hasNext())
                    tokensWriter.write(" ");
            }
            tokensWriter.write("\n");
        }
        if (tokenizationCommandCallback != null) {
            //report the lines since last report
            tokenizationCommandCallback.reportProgress(index % N);
        }

    } finally {
        tokensWriter.close();
        errorWriter.close();
    }

    tokenizationStopWatch.stop();

    if (!silent) {
        System.out.println("Tokenized " + lineCount + " lines.");
        System.out.println("Found " + tokenCount + " tokens.");
        System.out.println("Avg time for tokenizing a line : "
                + Double.valueOf(tokenizationStopWatch.getTime()) / Double.valueOf(lineCount) + " ms");
        System.out.println("\tProcessed : "
                + Double.valueOf(lineCount) / Double.valueOf(tokenizationStopWatch.getTime()) * 1000d
                + " lines in a second");
        System.out.println("Avg time for generating a token : "
                + Double.valueOf(tokenizationStopWatch.getTime()) / Double.valueOf(tokenCount) + " ms");
        System.out.println("\tProcessed : "
                + Double.valueOf(tokenCount) / Double.valueOf(tokenizationStopWatch.getTime()) * 1000d
                + " tokens in a second");

        final TextTokenizer.TextTokenizerStats stats = tokenizer.getStats();

        if (stats != null) {
            final LinkedHashMap<Pair<TextBlockTypeGroup, TextBlockTypeGroup>, Integer> successMap = stats
                    .buildSortedSuccessMap();
            System.out.println("Used " + successMap.size() + " distinct rules");

            final LinkedHashMap<Pair<TextBlockTypeGroup, TextBlockTypeGroup>, Set<MissingTokenizationRuleException>> failMap = stats
                    .buildSortedFailMap();
            System.out.println("Couldn't find a rule for " + failMap.size() + " distinct specs");
            System.out.println("Printing missing rules with occurrence count:");

            int countOfMissing = 0;
            for (Map.Entry<Pair<TextBlockTypeGroup, TextBlockTypeGroup>, Set<MissingTokenizationRuleException>> entry : failMap
                    .entrySet()) {
                final Pair<TextBlockTypeGroup, TextBlockTypeGroup> theCase = entry.getKey();
                final Set<MissingTokenizationRuleException> exceptionsForCase = entry.getValue();
                countOfMissing += exceptionsForCase.size();
                System.out.println("\t" + theCase + "\t" + exceptionsForCase.size());
                int i = 0;
                for (MissingTokenizationRuleException ex : exceptionsForCase) {
                    final String message = ex.getMessage().replace("\t", "\t\t\t");
                    final String contextStr = "..." + ex.getContextBlockGroup().getText() + "...";

                    System.out.println("\t\t" + contextStr + "\n\t\t" + message);
                    if (i == 2) //print only 3 messages for each case
                        break;
                    i++;
                }
            }

            System.out.println("Couldn't find a rule in a total of " + countOfMissing + " times");
        }
    }

    if (tokenizationCommandCallback != null) {
        tokenizationCommandCallback.reportFileFinished(tokenCount, numberOfLinesInError);
    }
}

From source file:won.protocol.model.URISerializeVsToStringTest.java

public static void main(String[] args) throws IOException, ClassNotFoundException {
    StopWatch stopWatch = new StopWatch();

    String myuri = "https://192.168.124.49:8443/won/resource/need/561548487155823600";

    stopWatch.start();//from  w  ww .  j a  va  2s. c  om
    stopWatch.suspend();
    for (int i = 0; i < 1000 * 1000; i++) {
        String toParse = myuri + RandomStringUtils.randomAlphanumeric(10);
        stopWatch.resume();
        URI theURI = URI.create(toParse);
        String anotherString = theURI.toString();
        stopWatch.suspend();
    }
    System.out.println("test1 took " + stopWatch.getTime() + " millis");

    stopWatch.reset();
    stopWatch.start();
    stopWatch.suspend();
    for (int i = 0; i < 1000 * 1000; i++) {
        URI theURI = URI.create(myuri + RandomStringUtils.randomAlphanumeric(10));
        ByteArrayOutputStream baos = new ByteArrayOutputStream();
        ObjectOutputStream oos = new ObjectOutputStream(baos);
        stopWatch.resume();
        oos.writeObject(theURI);
        byte[] data = baos.toByteArray();
        stopWatch.suspend();
        ByteArrayInputStream bais = new ByteArrayInputStream(data);
        ObjectInputStream ois = new ObjectInputStream(bais);
        stopWatch.resume();
        URI theSameURI = (URI) ois.readObject();
        stopWatch.suspend();
    }
    System.out.println("test2 took " + stopWatch.getTime() + " millis");
}