Example usage for com.google.common.base Stopwatch toString

List of usage examples for com.google.common.base Stopwatch toString

Introduction

In this page you can find the example usage for com.google.common.base Stopwatch toString.

Prototype

@GwtIncompatible("String.format()")
@Override
public String toString() 

Source Link

Document

Returns a string representation of the current elapsed time.

Usage

From source file:es.usc.citius.composit.core.composition.optimization.BackwardMinimizationOptimizer.java

@Override
public ServiceMatchNetwork<E, T> optimize(ServiceMatchNetwork<E, T> network) {
    Stopwatch globalWatch = Stopwatch.createStarted();
    Stopwatch localWatch = Stopwatch.createUnstarted();
    Set<E> newInputs = new HashSet<E>();
    List<Set<Operation<E>>> optimized = new ArrayList<Set<Operation<E>>>(network.numberOfLevels());
    log.debug("Starting service-backward optimization...");
    localWatch.start();// w w w  . j  a  v  a 2 s . c  om
    for (int i = network.numberOfLevels() - 1; i >= 0; i--) {
        Set<Operation<E>> current = network.getOperationsAtLevel(i);
        log.debug(" > Analyzing network level {} : {}", i, current);
        Set<Operation<E>> optimizedSet = new HashSet<Operation<E>>();
        Set<E> futureInputs = new HashSet<E>();
        // Find all services that produces at least one of the required inputs. If new inputs is
        // empty, then select all
        for (Operation<E> op : current) {
            log.debug("\t\tChecking operation {}", op.getID());
            if (newInputs.isEmpty()) {
                futureInputs.addAll(op.getSignature().getInputs());
                optimizedSet.add(op);
                log.debug("\t\t+ {} selected as a mandatory operation", op.getID());
            } else {
                boolean used = false;
                next: for (E output : op.getSignature().getOutputs()) {
                    for (E input : newInputs) {
                        used = network.match(output, input) != null;
                        if (used) {
                            log.debug(
                                    "\t\t+ Operation {} marked as useful (match detected between output {} and input {})",
                                    op.getID(), output, input);
                            optimizedSet.add(op);
                            // Update new inputs
                            futureInputs.addAll(op.getSignature().getInputs());
                            break next;
                        }
                    }
                }
                if (!used)
                    log.debug("\t\t- Operation {} marked as useless", op.getID());
            }
            //log.debug(" Inputs for the next iteration: {}", futureInputs);
        }
        newInputs.addAll(futureInputs);
        optimized.add(optimizedSet);
    }
    Collections.reverse(optimized);
    // Create a new match network
    localWatch.reset().start();
    ServiceMatchNetwork<E, T> optimizedNetwork = new DirectedAcyclicSMN<E, T>(
            new HashLeveledServices<E>(optimized), network);
    localWatch.stop();
    log.debug(" > Optimized match network created in {}", localWatch.toString());
    log.debug("Backward Optimization done in {}. Size before/after {}/{}", globalWatch.stop().toString(),
            network.listOperations().size(), optimizedNetwork.listOperations().size());
    // Create a new optimized service match network
    return optimizedNetwork;
}

From source file:org.daisy.maven.xspec.XSpecRunner.java

private TestResults runSingle(String testName, File testFile, File reportDir) {
    // Prepare the reporters
    File textReport = new File(reportDir, "OUT-" + testName + ".txt");
    PrintWriter writer = null;//from  ww  w. j ava 2s.  co  m
    try {
        writer = new PrintWriter(Files.newWriter(textReport, Charsets.UTF_8));
    } catch (FileNotFoundException e) {
        throw new IllegalStateException(e.getMessage(), e);
    }
    SaxonReporter saxonReporter = new SaxonReporter(writer);

    XdmDestination xspecTestResult = new XdmDestination();
    XdmDestination xspecTestCompiled = new XdmDestination();
    SaxonApiException executionException = null;

    Stopwatch stopwatch = Stopwatch.createStarted();
    report("Running " + testName, writer);

    try {
        // Compile the XSPec test into an executable XSLT
        XsltTransformer xspecCompiler = xspecCompilerLoader.load();
        Source testAsSource = new StreamSource(testFile);
        xspecCompiler.setSource(testAsSource);
        xspecCompiler.setDestination(xspecTestCompiled);
        xspecCompiler.setErrorListener(saxonReporter);
        xspecCompiler.setMessageListener(saxonReporter);
        xspecCompiler.setURIResolver(new XSpecResolver(xspecCompiler.getURIResolver()));
        xspecCompiler.transform();

        // Create a new URI resolver if a mock catalog is present
        File catalog = new File(testFile.getParentFile(), "catalog.xml");
        URIResolver testResolver = defaultResolver;
        if (catalog.exists()) {
            CatalogManager catman = new CatalogManager();
            catman.setUseStaticCatalog(false);
            catman.setCatalogFiles(catalog.getPath());
            testResolver = new CatalogResolver(catman);
        }

        // Run the compiled XSpec test
        XsltCompiler xspecTestCompiler = processor.newXsltCompiler();
        xspecTestCompiler.setURIResolver(new XSpecResolver(testResolver));
        processor.getUnderlyingConfiguration().setErrorListener(saxonReporter);
        Source compiledTestAsSource = xspecTestCompiled.getXdmNode().asSource();
        XsltTransformer xspecTestRunner = xspecTestCompiler.compile(compiledTestAsSource).load();
        xspecTestRunner.setInitialTemplate(XSPEC_MAIN_TEMPLATE);
        xspecTestRunner.setDestination(xspecTestResult);
        xspecTestRunner.setErrorListener(saxonReporter);
        xspecTestRunner.setMessageListener(saxonReporter);
        xspecTestRunner.setURIResolver(new XSpecResolver(testResolver));
        xspecTestRunner.transform();

    } catch (SaxonApiException e) {
        report(e.getMessage(), writer);
        e.printStackTrace(writer);
        executionException = e;
    }

    stopwatch.stop();

    TestResults result = (executionException == null)
            ? XSpecResultBuilder.fromReport(testName, xspecTestResult.getXdmNode(), xpathCompiler,
                    stopwatch.toString())
            : XSpecResultBuilder.fromException(testName, executionException, stopwatch.toString());

    report(result.toString(), writer);

    writer.close();

    if (result.getErrors() == 0) {
        try {
            // Write XSpec report
            File xspecReport = new File(reportDir, "XSPEC-" + testName + ".xml");
            serializeToFile(xspecReport).serializeNode(xspecTestResult.getXdmNode());

            // Write HTML report
            File css = new File(reportDir, XSPEC_CSS_NAME);
            if (!css.exists()) {
                cssSupplier.copyTo(new FileOutputStream(css));
            }
            File htmlReport = new File(reportDir, "HTML-" + testName + ".html");
            XsltTransformer htmlFormatter = xspecHtmlFormatterLoader.load();
            htmlFormatter.setSource(xspecTestResult.getXdmNode().asSource());
            htmlFormatter.setParameter(XSPEC_CSS_URI_PARAM, XSPEC_CSS_URI);
            htmlFormatter.setDestination(serializeToFile(htmlReport));
            htmlFormatter.setMessageListener(SaxonSinkReporter.INSTANCE);
            htmlFormatter.transform();

            // Write Surefire report
            File surefireReport = new File(reportDir, "TEST-" + testName + ".xml");
            XsltTransformer junitFormatter = xspecJUnitFormatterLoader.load();
            junitFormatter.setSource(xspecTestResult.getXdmNode().asSource());
            junitFormatter.setDestination(serializeToFile(surefireReport));
            junitFormatter.setParameter(JUNIT_NAME_PARAM, new XdmAtomicValue(testName));
            junitFormatter.setParameter(JUNIT_TIME_PARAM,
                    new XdmAtomicValue(stopwatch.elapsed(TimeUnit.MILLISECONDS) / 1000d));
            junitFormatter.transform();
        } catch (SaxonApiException e) {
            throw new RuntimeException(e);
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
    }

    return result;
}

From source file:com.edduarte.vokter.document.DocumentBuilder.java

/**
 * Indexes the documents specified in the factory method and adds the index
 * files into the specified folder./*from w  ww.j  a  v a 2s  .c  o m*/
 * <p>
 * This method will perform all tasks associated with reading a corpus,
 * processing and indexing it, writing the results to disk persistence and
 * building cached systems that provide synchronous access to documents and
 * tokens.
 * <p>
 * The most recently accessed tokens and documents are kept in memory for 20
 * seconds before being destroyed. If a token and a document are not in cache,
 * the relevant data is read and parsed from the local files.
 *
 * @return the built index of the documents specified in the factory method
 */
public Document build(DB occurrencesDB, ParserPool parserPool) {
    Stopwatch sw = Stopwatch.createStarted();

    // step 1) Perform a lazy loading of the document, by obtaining its url,
    // content stream and content type.
    DocumentInput input = documentLazySupplier.get();

    // step 2) Checks if the input document is supported by the server
    boolean isSupported = OSGiManager.getCompatibleReader(input.getContentType()) != null;
    if (!isSupported) {
        logger.info("Ignored processing document '{}': No compatible readers available for content-type '{}'.",
                input.getUrl(), input.getContentType());
        return null;
    }

    // step 3) Takes a parser from the parser-pool.
    Parser parser;
    try {
        parser = parserPool.take();
    } catch (InterruptedException ex) {
        logger.error(ex.getMessage(), ex);
        return null;
    }

    // step 4) Build a processing instruction to be executed.
    //         A pipeline instantiates a new object for each of the
    //         required modules, improving performance of parallel jobs.
    DocumentPipeline pipeline = new DocumentPipeline(

            // the language detection model
            langDetector,

            // general structure that holds the created occurrences
            occurrencesDB,

            // the input document info, including its path and InputStream
            input,

            // parser that will be used for document parsing and occurrence
            // detection
            parser,

            // flag that sets that stopwords will be filtered during
            // tokenization
            isStoppingEnabled,

            // flag that sets that every found occurrence during tokenization will
            // be stemmer
            isStemmingEnabled,

            // flag that forces every found token to be lower case, matching,
            // for example, the words 'be' and 'Be' as the same token
            ignoreCase);

    // step 5) Process the document asynchronously.
    Document document;
    try {
        document = pipeline.call();
    } catch (Exception ex) {
        logger.error(ex.getMessage(), ex);
        return null;
    }

    // step 6) Place the parser back in the parser-pool.
    try {
        parserPool.place(parser);
    } catch (InterruptedException ex) {
        logger.error(ex.getMessage(), ex);
        return null;
    }

    sw.stop();
    logger.info("Completed processing document '{}' in {}.", document.getUrl(), sw.toString());

    return document;
}

From source file:com.edduarte.argus.document.DocumentBuilder.java

/**
 * Indexes the documents specified in the factory method and adds the index
 * files into the specified folder.// w w w .ja va 2s  .com
 * <p>
 * This method will perform all tasks associated with reading a corpus,
 * processing and indexing it, writing the results to disk persistence and
 * building cached systems that provide synchronous access to documents and
 * tokens.
 * <p>
 * The most recently accessed tokens and documents are kept in memory for 20
 * seconds before being destroyed. If a token and a document are not in cache,
 * the relevant data is read and parsed from the local files.
 *
 * @return the built index of the documents specified in the factory method
 */
public Document build(DB occurrencesDB, ParserPool parserPool) {
    Stopwatch sw = Stopwatch.createStarted();

    // step 1) Perform a lazy loading of the document, by obtaining its url,
    // content stream and content type.
    DocumentInput input = documentLazySupplier.get();

    // step 2) Checks if the input document is supported by the server
    boolean isSupported = PluginLoader.getCompatibleReader(input.getContentType()) != null;
    if (!isSupported) {
        logger.info("Ignored processing document '{}': No compatible readers available for content-type '{}'.",
                input.getUrl(), input.getContentType());
        return null;
    }

    // step 3) Takes a parser from the parser-pool.
    Parser parser;
    try {
        parser = parserPool.take();
    } catch (InterruptedException ex) {
        logger.error(ex.getMessage(), ex);
        return null;
    }

    // step 4) Build a processing instruction to be executed.
    //         A pipeline instantiates a new object for each of the
    //         required modules, improving performance of parallel jobs.
    DocumentPipeline pipeline = new DocumentPipeline(

            // the language detection model
            langDetector,

            // general structure that holds the created occurrences
            occurrencesDB,

            // the input document info, including its path and InputStream
            input,

            // parser that will be used for document parsing and occurrence
            // detection
            parser,

            // flag that sets that stopwords will be filtered during
            // tokenization
            isStoppingEnabled,

            // flag that sets that every found occurrence during tokenization will
            // be stemmer
            isStemmingEnabled,

            // flag that forces every found token to be lower case, matching,
            // for example, the words 'be' and 'Be' as the same token
            ignoreCase);

    // step 5) Process the document asynchronously.
    Document document;
    try {
        document = pipeline.call();
    } catch (Exception ex) {
        logger.error(ex.getMessage(), ex);
        return null;
    }

    // step 6) Place the parser back in the parser-pool.
    try {
        parserPool.place(parser);
    } catch (InterruptedException ex) {
        logger.error(ex.getMessage(), ex);
        return null;
    }

    sw.stop();
    logger.info("Completed processing document '{}' in {}.", document.getUrl(), sw.toString());

    return document;
}

From source file:com.edduarte.argus.diff.DifferenceDetector.java

@Override
public List<Difference> call() {
    Stopwatch sw = Stopwatch.createStarted();

    DiffMatchPatch dmp = new DiffMatchPatch();

    String original = oldSnapshot.getProcessedContent();
    String revision = newSnapshot.getProcessedContent();

    LinkedList<DiffMatchPatch.Diff> diffs = dmp.diff_main(original, revision);
    dmp.diff_cleanupSemantic(diffs);/*from   w  w w  .  j  a v a2 s . c om*/

    Parser parser;
    try {
        parser = parserPool.take();
    } catch (InterruptedException ex) {
        logger.error(ex.getMessage(), ex);
        return null;
    }

    int insertedCountOffset = 0, deletedCountOffset = 0;
    List<Difference> retrievedDiffs = new ArrayList<>();
    for (DiffMatchPatch.Diff diff : diffs) {
        String diffText = diff.text;

        List<Parser.Result> results = parser.parse(new MutableString(diffText));
        for (Parser.Result result : results) {
            String snippet;
            String occurrenceText = result.text.toString();
            switch (diff.action) {
            case inserted: {
                int wordNum = insertedCountOffset++;
                snippet = getSnippet(newSnapshot, occurrenceText, wordNum);
                break;
            }
            case deleted: {
                int wordNum = deletedCountOffset++;
                snippet = getSnippet(oldSnapshot, occurrenceText, wordNum);
                break;
            }
            default: {
                insertedCountOffset++;
                deletedCountOffset++;
                continue;
            }
            }

            retrievedDiffs.add(new Difference(diff.action, result.text.toString(), snippet));
        }
        results.clear();
        results = null;
    }

    try {
        parserPool.place(parser);
    } catch (InterruptedException ex) {
        logger.error(ex.getMessage(), ex);
        return null;
    }

    //        ListIterator<MatchedDiff> it = retrievedDiffs.listIterator();
    //        int i = 1;
    //        while (it.hasNext() && i < retrievedDiffs.size()) {
    //            MatchedDiff d1 = it.next();
    //            MatchedDiff d2 = retrievedDiffs.get(i);
    //
    //            if (d1.status == d2.status &&
    //                    d1.keyword.equals(d2.keyword) &&
    //                    d1.endIndex + SNIPPET_INDEX_OFFSET >= d2.startIndex - SNIPPET_INDEX_OFFSET) {
    ////                d2.startIndex = d1.startIndex;
    //                it.remove();
    //
    //            } else {
    //                i++;
    //            }
    //        }

    sw.stop();
    logger.info("Completed difference detection for document '{}' in {}", newSnapshot.getUrl(), sw.toString());
    return retrievedDiffs;
}

From source file:nextmethod.web.razor.editor.internal.BackgroundThread.java

private void workerLoop() {
    final boolean isEditorTracing = Debug.isDebugArgPresent(DebugArgs.EditorTracing);
    final String fileNameOnly = Filesystem.getFileName(fileName);

    Stopwatch sw = null;
    if (isEditorTracing) {
        sw = Stopwatch.createUnstarted();
    }// w w  w .  j  a  v  a  2  s  .  c  o m

    try {
        RazorEditorTrace.traceLine(RazorResources().traceBackgroundThreadStart(fileNameOnly));
        ensureOnThread();
        while (!shutdownToken.isCancellationRequested()) {
            // Grab the parcel of work to do
            final WorkParcel parcel = main.getParcel();
            if (!parcel.getChanges().isEmpty()) {
                RazorEditorTrace.traceLine(RazorResources().traceChangesArrived(fileNameOnly,
                        String.valueOf(parcel.getChanges().size())));
                try {
                    DocumentParseCompleteEventArgs args = null;
                    try (CancellationTokenSource linkedCancel = CancellationTokenSource
                            .createLinkedTokenSource(shutdownToken, parcel.getCancelToken())) {
                        if (parcel != null && !linkedCancel.isCancellationRequested()) {
                            // Collect ALL changes
                            if (isEditorTracing && previouslyDiscarded != null
                                    && !previouslyDiscarded.isEmpty()) {
                                RazorEditorTrace.traceLine(RazorResources().traceCollectedDiscardedChanges(
                                        fileNameOnly, String.valueOf(parcel.getChanges().size())));
                            }
                            final Iterable<TextChange> allChanges = Iterables
                                    .concat(previouslyDiscarded != null ? previouslyDiscarded
                                            : Collections.<TextChange>emptyList(), parcel.getChanges());

                            final TextChange finalChange = Iterables.getLast(allChanges, null);
                            if (finalChange != null) {
                                if (isEditorTracing) {
                                    assert sw != null;
                                    sw.reset().start();
                                }

                                //noinspection ConstantConditions
                                final GeneratorResults results = parseChange(finalChange.getNewBuffer(),
                                        linkedCancel.getToken());

                                if (isEditorTracing) {
                                    assert sw != null;
                                    sw.stop();
                                }

                                RazorEditorTrace.traceLine(RazorResources().traceParseComplete(fileNameOnly,
                                        sw != null ? sw.toString() : "?"));

                                if (results != null && !linkedCancel.isCancellationRequested()) {
                                    // Clear discarded changes list
                                    previouslyDiscarded = Lists.newArrayList();
                                    // Take the current tree and check for differences
                                    if (isEditorTracing) {
                                        sw.reset().start();
                                    }
                                    final boolean treeStructureChanged = currentParseTree == null
                                            || BackgroundParser.treesAreDifferent(currentParseTree,
                                                    results.getDocument(), allChanges, parcel.getCancelToken());

                                    if (isEditorTracing) {
                                        sw.stop();
                                    }

                                    currentParseTree = results.getDocument();
                                    RazorEditorTrace.traceLine(RazorResources().traceTreesCompared(fileNameOnly,
                                            sw != null ? sw.toString() : "?",
                                            String.valueOf(treeStructureChanged)));

                                    // Build Arguments
                                    args = new DocumentParseCompleteEventArgs(treeStructureChanged, results,
                                            finalChange);
                                } else {
                                    // Parse completed but we were cancelled in the mean time. Add these to the discarded changes set
                                    RazorEditorTrace.traceLine(RazorResources().traceChangesDiscarded(
                                            fileNameOnly, String.valueOf(Iterables.size(allChanges))));
                                    previouslyDiscarded = Lists.newArrayList(allChanges);
                                }

                                if (Debug.isDebugArgPresent(DebugArgs.CheckTree) && args != null) {
                                    // Rewind the buffer and sanity check the line mappings
                                    finalChange.getNewBuffer().setPosition(0);
                                    final String buffer = TextExtensions.readToEnd(finalChange.getNewBuffer());
                                    final int lineCount = Iterables
                                            .size(Splitter.on(CharMatcher.anyOf("\r\n")).split(buffer));
                                    Debug.doAssert(!Iterables.any(
                                            args.getGeneratorResults().getDesignTimeLineMappingEntries(),
                                            input -> input != null
                                                    && input.getValue().getStartLine() > lineCount),
                                            "Found a design-time line mapping referring to a line outside the source file!");

                                    Debug.doAssert(
                                            !Iterables.any(args.getGeneratorResults().getDocument().flatten(),
                                                    input -> input != null
                                                            && input.getStart().getLineIndex() > lineCount),
                                            "Found a span with a line number outside the source file");
                                }
                            }
                        }
                    }
                    if (args != null) {
                        main.returnParcel(args);
                    }
                } catch (OperationCanceledException ignored) {

                }
            } else {
                RazorEditorTrace.traceLine(RazorResources().traceNoChangesArrived(fileName),
                        parcel.getChanges().size());
                Thread.yield();
            }
        }
    } catch (OperationCanceledException ignored) {
    } finally {
        RazorEditorTrace.traceLine(RazorResources().traceBackgroundThreadShutdown(fileNameOnly));
        // Clean up main thread resources
        main.close();
    }
}

From source file:com.edduarte.vokter.diff.DifferenceDetector.java

@Override
public List<Difference> call() {
    Stopwatch sw = Stopwatch.createStarted();

    DiffMatchPatch dmp = new DiffMatchPatch();

    String original = oldSnapshot.getProcessedContent();
    String revision = newSnapshot.getProcessedContent();

    // TODO: use LSH to determine a similarity index. If distance is above
    // 0.4, the documents are different enough and a more computational
    // intensive task (analysing token by token differences).

    LinkedList<DiffMatchPatch.Diff> diffs = dmp.diff_main(original, revision);
    dmp.diff_cleanupSemantic(diffs);/*from w w w  .  j  a  v  a  2 s.  c o  m*/

    Parser parser;
    try {
        parser = parserPool.take();
    } catch (InterruptedException ex) {
        logger.error(ex.getMessage(), ex);
        return null;
    }

    int insertedCountOffset = 0, deletedCountOffset = 0;
    List<Difference> retrievedDiffs = new ArrayList<>();
    for (DiffMatchPatch.Diff diff : diffs) {
        String diffText = diff.text;

        List<Parser.Result> results = parser.parse(new MutableString(diffText));
        for (Parser.Result result : results) {
            String snippet;
            String occurrenceText = result.text.toString();
            switch (diff.action) {
            case inserted: {
                int wordNum = insertedCountOffset++;
                snippet = getSnippet(newSnapshot, occurrenceText, wordNum);
                break;
            }
            case deleted: {
                int wordNum = deletedCountOffset++;
                snippet = getSnippet(oldSnapshot, occurrenceText, wordNum);
                break;
            }
            default: {
                insertedCountOffset++;
                deletedCountOffset++;
                continue;
            }
            }

            retrievedDiffs.add(new Difference(diff.action, result.text.toString(), snippet));
        }
        results.clear();
        results = null;
    }

    try {
        parserPool.place(parser);
    } catch (InterruptedException ex) {
        logger.error(ex.getMessage(), ex);
        return null;
    }

    //        ListIterator<MatchedDiff> it = retrievedDiffs.listIterator();
    //        int i = 1;
    //        while (it.hasNext() && i < retrievedDiffs.size()) {
    //            MatchedDiff d1 = it.next();
    //            MatchedDiff d2 = retrievedDiffs.get(i);
    //
    //            if (d1.status == d2.status &&
    //                    d1.keyword.equals(d2.keyword) &&
    //                    d1.endIndex + SNIPPET_INDEX_OFFSET >= d2.startIndex - SNIPPET_INDEX_OFFSET) {
    ////                d2.startIndex = d1.startIndex;
    //                it.remove();
    //
    //            } else {
    //                i++;
    //            }
    //        }

    sw.stop();
    logger.info("Completed difference detection for document '{}' in {}", newSnapshot.getUrl(), sw.toString());
    return retrievedDiffs;
}

From source file:cosmos.mapred.MediawikiQueries.java

public void run(int numIterations) throws Exception {
    final Random offsetR = new Random(), cardinalityR = new Random();

    int iters = 0;

    while (iters < numIterations) {
        Store id = Store.create(this.con,
                this.con.securityOperations().getUserAuthorizations(this.con.whoami()),
                IdentitySet.<Index>create());

        int offset = offsetR.nextInt(MAX_OFFSET);
        int numRecords = cardinalityR.nextInt(MAX_SIZE) + 1;

        BatchScanner bs = this.con.createBatchScanner("sortswiki", new Authorizations(), 4);

        bs.setRanges(Collections.singleton(new Range(Integer.toString(offset), Integer.toString(MAX_ROW))));

        Iterable<Entry<Key, Value>> inputIterable = Iterables.limit(bs, numRecords);

        this.sorts.register(id);

        System.out.println(Thread.currentThread().getName() + ": " + id.uuid() + " - Iteration " + iters);
        long recordsReturned = 0l;
        Function<Entry<Key, Value>, MultimapRecord> func = new Function<Entry<Key, Value>, MultimapRecord>() {
            @Override// ww w .  j  a va 2s . c  o m
            public MultimapRecord apply(Entry<Key, Value> input) {
                Page p;
                try {
                    p = Page.parseFrom(input.getValue().get());
                } catch (InvalidProtocolBufferException e) {
                    throw new RuntimeException(e);
                }
                return pagesToQueryResult(p);
            }
        };

        Map<Column, Long> counts = Maps.newHashMap();
        ArrayList<MultimapRecord> tformSource = Lists.newArrayListWithCapacity(20000);

        Stopwatch sw = new Stopwatch();
        Stopwatch tformSw = new Stopwatch();

        for (Entry<Key, Value> input : inputIterable) {
            tformSw.start();

            MultimapRecord r = func.apply(input);
            tformSource.add(r);

            tformSw.stop();

            loadCountsForRecord(counts, r);
            recordsReturned++;
        }

        sw.start();
        this.sorts.addResults(id, tformSource);
        sw.stop();

        long actualNumResults = tformSource.size();

        System.out.println(Thread.currentThread().getName() + ": Took " + tformSw + " transforming and " + sw
                + " to store " + recordsReturned + " records");
        logTiming(actualNumResults, tformSw.elapsed(TimeUnit.MILLISECONDS), "transformInput");
        logTiming(actualNumResults, sw.elapsed(TimeUnit.MILLISECONDS), "ingest");

        bs.close();

        Random r = new Random();
        int max = r.nextInt(10) + 1;

        // Run a bunch of queries
        for (int count = 0; count < max; count++) {
            long resultCount;
            String name;
            int i = r.nextInt(9);

            if (0 == i) {
                resultCount = docIdFetch(id, counts, actualNumResults);
                name = "docIdFetch";
            } else if (1 == i) {
                resultCount = columnFetch(id, REVISION_ID, counts, actualNumResults);
                name = "revisionIdFetch";
            } else if (2 == i) {
                resultCount = columnFetch(id, PAGE_ID, counts, actualNumResults);
                name = "pageIdFetch";
            } else if (3 == i) {
                groupBy(id, REVISION_ID, counts, actualNumResults);
                // no sense to verify here
                resultCount = recordsReturned;
                name = "groupByRevisionId";
            } else if (4 == i) {
                groupBy(id, PAGE_ID, counts, actualNumResults);
                // no sense to verify here
                resultCount = recordsReturned;
                name = "groupByRevisionId";
            } else if (5 == i) {
                resultCount = columnFetch(id, CONTRIBUTOR_USERNAME, counts, actualNumResults);
                name = "contributorUsernameFetch";
            } else if (6 == i) {
                groupBy(id, CONTRIBUTOR_USERNAME, counts, actualNumResults);
                // no sense to verify here
                resultCount = recordsReturned;
                name = "groupByContributorUsername";
            } else if (7 == i) {
                resultCount = columnFetch(id, CONTRIBUTOR_ID, counts, actualNumResults);
                name = "contributorIdFetch";
            } else {//if (8 == i) {
                groupBy(id, CONTRIBUTOR_ID, counts, actualNumResults);
                // no sense to verify here
                resultCount = recordsReturned;
                name = "groupByContributorID";
            }
        }
        System.out.println(Thread.currentThread().getName() + ": not deleting " + id);
        // Delete the results
        sw = new Stopwatch();

        sw.start();

        this.sorts.delete(id);
        sw.stop();

        System.out.println(Thread.currentThread().getName() + ": Took " + sw.toString() + " to delete results");
        logTiming(actualNumResults, sw.elapsed(TimeUnit.MILLISECONDS), "deleteResults");

        iters++;
    }

    this.sorts.close();
}

From source file:fr.inria.eventcloud.overlay.can.StaticLoadBalancingTestBuilder.java

public Test build() {

    return new Test() {

        private static final String CENTROID_SHORT_RDF_TERM_PREFIX = "http://aaa";

        private static final String CENTROID_LONG_RDF_TERM_PREFIX = "http://zzz";

        @Override// ww  w  .  j av a 2 s  .c  o m
        protected void _execute() throws EventCloudIdNotManaged, NetworkAlreadyJoinedException,
                FileNotFoundException, PeerNotActivatedException {

            if (StaticLoadBalancingTestBuilder.this.enableLoadBalancing) {
                EventCloudProperties.STATIC_LOAD_BALANCING.setValue(true);
            }

            EventCloudProperties.RECORD_STATS_MISC_DATASTORE.setValue(true);

            if (StaticLoadBalancingTestBuilder.this.statsRecorderClass != null) {
                EventCloudProperties.STATS_RECORDER_CLASS
                        .setValue(StaticLoadBalancingTestBuilder.this.statsRecorderClass);
            }

            this.eventCloudId = this.deployer.newEventCloud(1, 1);

            SemanticPeer firstPeer = this.deployer.getRandomSemanticPeer(this.eventCloudId);

            final PutGetApi putgetProxy = ProxyFactory.newPutGetProxy(this.deployer.getEventCloudsRegistryUrl(),
                    this.eventCloudId);

            final Stopwatch stopwatch = Stopwatch.createUnstarted();

            Node graph = null;

            if (StaticLoadBalancingTestBuilder.this.trigResource == null) {
                if (this.simulateCompoundEvents()) {
                    graph = NodeGenerator.randomUri(StaticLoadBalancingTestBuilder.this.rdfTermSize);
                }

                int tmpNbQuadsToInsert = StaticLoadBalancingTestBuilder.this.nbQuadsToInsert;
                if (this.isCentroidStatsRecorderUsed()
                        && StaticLoadBalancingTestBuilder.this.nbPeersToInject > 0) {
                    tmpNbQuadsToInsert = StaticLoadBalancingTestBuilder.this.nbQuadsToInsert / 3 * 2;
                }

                for (int i = 0; i < tmpNbQuadsToInsert; i++) {
                    Quadruple quad = null;

                    if (this.simulateCompoundEvents()
                            && i % StaticLoadBalancingTestBuilder.this.nbQuadsPerCompoundEvent == 0) {
                        if (this.isCentroidStatsRecorderUsed()
                                && StaticLoadBalancingTestBuilder.this.nbPeersToInject > 1) {
                            graph = NodeGenerator.randomUri(CENTROID_SHORT_RDF_TERM_PREFIX,
                                    StaticLoadBalancingTestBuilder.this.rdfTermSize);
                        } else {
                            graph = NodeGenerator.randomUri(StaticLoadBalancingTestBuilder.this.rdfTermSize);
                        }
                    }

                    quad = this.buildQuadruple(graph, StaticLoadBalancingTestBuilder.this.rdfTermSize);

                    stopwatch.start();
                    putgetProxy.add(quad);
                    stopwatch.stop();
                }
            } else {
                List<Quadruple> quads = StaticLoadBalancingTestBuilder.this
                        .loadEvents(StaticLoadBalancingTestBuilder.this.trigResource);
                StaticLoadBalancingTestBuilder.this.nbQuadsToInsert = quads.size();

                LOG.info("{} quadruples loaded from {}", quads.size(),
                        StaticLoadBalancingTestBuilder.this.trigResource);

                for (Quadruple q : quads) {
                    stopwatch.start();
                    putgetProxy.add(q);
                    stopwatch.stop();
                }
            }

            if (StaticLoadBalancingTestBuilder.this.insertSkewedData && this.isCentroidStatsRecorderUsed()
                    && StaticLoadBalancingTestBuilder.this.nbPeersToInject > 0) {
                // add 1/3 of the data which are 10 times longer
                int longRdfTermSize = StaticLoadBalancingTestBuilder.this.rdfTermSize * 10;

                if (this.simulateCompoundEvents()) {
                    graph = NodeGenerator.randomUri(CENTROID_LONG_RDF_TERM_PREFIX, longRdfTermSize);
                }

                for (int i = 0; i < StaticLoadBalancingTestBuilder.this.nbQuadsToInsert / 3; i++) {
                    Quadruple quad = null;

                    if (this.simulateCompoundEvents()
                            && i % StaticLoadBalancingTestBuilder.this.nbQuadsPerCompoundEvent == 0) {
                        graph = NodeGenerator.randomUri(CENTROID_LONG_RDF_TERM_PREFIX + longRdfTermSize);
                    }

                    quad = this.buildQuadruple(graph, longRdfTermSize);

                    stopwatch.start();
                    putgetProxy.add(quad);
                    stopwatch.stop();
                }
            }

            LOG.info("It took {} to insert {} quadruples", stopwatch.toString(),
                    StaticLoadBalancingTestBuilder.this.nbQuadsToInsert);

            this.executionTime = stopwatch.elapsed(TimeUnit.MILLISECONDS);

            if (StaticLoadBalancingTestBuilder.this.nbPeersToInject > 0) {
                LOG.info("Before join, first peer dump:\n" + firstPeer.dump());

                for (int i = 0; i < StaticLoadBalancingTestBuilder.this.nbPeersToInject; i++) {
                    long maxNumQuads = -1;
                    Peer electedPeer = null;
                    List<Peer> peers = this.deployer.getRandomSemanticTracker(this.eventCloudId).getPeers();

                    // we select the peer which has the higher number of
                    // quadruples in the misc datastore in order to
                    // perform the next split
                    for (Peer p : peers) {
                        GetStatsRecordeResponseOperation response = (GetStatsRecordeResponseOperation) PAFuture
                                .getFutureValue(p.receive(new GetStatsRecorderOperation()));
                        if (response.getStatsRecorder().getNbQuadruples() > maxNumQuads) {
                            maxNumQuads = response.getStatsRecorder().getNbQuadruples();
                            electedPeer = p;
                        }
                    }

                    Peer newPeer = SemanticFactory.newSemanticPeer(new SemanticOverlayProvider(true));

                    newPeer.join(electedPeer);

                    this.deployer.getRandomSemanticTracker(this.eventCloudId).storePeer(newPeer);

                    LOG.info("Join operation " + (i + 1));
                }

                LOG.info("After injections, other peers dump:\n");
                for (Peer p : this.deployer.getRandomSemanticTracker(this.eventCloudId).getPeers()) {
                    LOG.info(p.dump());
                }

                if (StaticLoadBalancingTestBuilder.this.nbLookupsAfterJoinOperations > 0) {
                    for (int i = 0; i < StaticLoadBalancingTestBuilder.this.nbLookupsAfterJoinOperations; i++) {
                        // long size =
                        putgetProxy.find(QuadruplePattern.ANY).size();

                        // Assert.assertEquals(
                        // StaticLoadBalancingTestBuilder.this.nbQuadsToInsert,
                        // size);
                    }
                }
            } else {
                LOG.info("Peer dump:\n" + firstPeer.dump());
            }

            ComponentUtils.terminateComponent(putgetProxy);
        }

        private Quadruple buildQuadruple(Node graph, int rdfTermSize) {
            if (this.simulateCompoundEvents()) {
                if (this.isCentroidStatsRecorderUsed()
                        && StaticLoadBalancingTestBuilder.this.nbPeersToInject > 1) {
                    if (rdfTermSize > StaticLoadBalancingTestBuilder.this.rdfTermSize) {
                        return QuadrupleGenerator.randomWithoutLiteral(graph, CENTROID_LONG_RDF_TERM_PREFIX,
                                rdfTermSize);
                    } else {
                        return QuadrupleGenerator.randomWithoutLiteral(graph, CENTROID_SHORT_RDF_TERM_PREFIX,
                                rdfTermSize);
                    }
                } else {
                    if (graph == null) {
                        return QuadrupleGenerator.randomWithoutLiteral(rdfTermSize);
                    } else {
                        return QuadrupleGenerator.randomWithoutLiteral(graph, rdfTermSize);
                    }
                }
            } else {
                if (this.isCentroidStatsRecorderUsed()
                        && StaticLoadBalancingTestBuilder.this.nbPeersToInject > 1) {
                    if (rdfTermSize > StaticLoadBalancingTestBuilder.this.rdfTermSize) {
                        return QuadrupleGenerator.randomWithoutLiteral(CENTROID_LONG_RDF_TERM_PREFIX,
                                rdfTermSize);
                    } else {
                        return QuadrupleGenerator.randomWithoutLiteral(CENTROID_SHORT_RDF_TERM_PREFIX,
                                rdfTermSize);
                    }
                } else {
                    if (graph == null) {
                        return QuadrupleGenerator.randomWithoutLiteral(rdfTermSize);
                    } else {
                        return QuadrupleGenerator.randomWithoutLiteral(graph, rdfTermSize);
                    }
                }
            }
        }

        private boolean isCentroidStatsRecorderUsed() {
            return (StaticLoadBalancingTestBuilder.this.statsRecorderClass != null)
                    && (StaticLoadBalancingTestBuilder.this.statsRecorderClass
                            .isAssignableFrom(CentroidStatsRecorder.class));
        }

        private boolean simulateCompoundEvents() {
            return StaticLoadBalancingTestBuilder.this.nbQuadsPerCompoundEvent != -1;
        }

    };

}

From source file:eu.project.ttc.tools.cli.TermSuiteTerminoCLI.java

private void run(String[] args) throws IOException, UIMAException, UnsupportedEncodingException {
    Stopwatch sw = Stopwatch.createStarted();

    // create the Options
    Options options = declareOptions();/*ww  w.j a  va  2s.co  m*/

    try {
        // Parse and set CL options
        CommandLine line = new PosixParser().parse(options, args, false);
        readArguments(line);
        if (line.hasOption(NO_LOGGING))
            TermSuiteCLIUtils.disableLogging();
        else if (line.hasOption(DEBUG))
            TermSuiteCLIUtils.setGlobalLogLevel("debug");
        else if (line.hasOption(TRACE))
            TermSuiteCLIUtils.setGlobalLogLevel("trace");
        else
            TermSuiteCLIUtils.setGlobalLogLevel("info");

        TermSuiteCLIUtils.logCommandLineOptions(line);

        TermSuitePipeline pipeline = TermSuitePipeline.create(language.getCode());

        switch (collectionMode) {
        case INLINE_TEXT:
            pipeline.setInlineString(inlineText);
            break;
        case FILESYSTEM:
            pipeline.setCollection(corpusType, corpusPath, encoding);
            break;
        case ISTEX_API:
            pipeline.setIstexCollection(istexAPIUrl.get(), istexIds.get());
            break;
        }

        // resource
        if (resourcePack.isPresent()) {
            if (resourcePack.get().endsWith(".jar"))
                pipeline.setResourceJar(resourcePack.get());
            else
                pipeline.setResourceDir(resourcePack.get());
        }

        // mongodb
        if (mongoStoreDBURL.isPresent())
            pipeline.setMongoDBOccurrenceStore(mongoStoreDBURL.get());

        // tokenizer
        pipeline.aeWordTokenizer();

        // tagger
        if (tagger == Tagger.TreeTagger)
            pipeline.setTreeTaggerHome(taggerHome).aeTreeTagger();
        else if (tagger == Tagger.Mate)
            pipeline.setMateModelPath(taggerHome).aeMateTaggerLemmatizer();

        // Filter urlsFilter
        pipeline.aeUrlFilter();

        // stemmer
        pipeline.aeStemmer();

        // regex spotter
        pipeline.setSpotWithOccurrences(spotWithOccurrences);
        pipeline.aeRegexSpotter();

        //export Json CAS spotter
        if (jsonCasFile.isPresent())
            pipeline.haeJsonCasExporter(jsonCasFile.get());
        // filter stop words
        pipeline.aeStopWordsFilter();

        // specificity computer
        pipeline.aeSpecificityComputer();

        // compost (morphology)
        if (compostAlpha.isPresent())
            pipeline.setCompostCoeffs(compostAlpha.get(), compostBeta.get(), compostGamma.get(),
                    compostDelta.get());
        if (compostMinComponentSize.isPresent())
            pipeline.setCompostMinComponentSize(compostMinComponentSize.get());
        if (compostMaxComponentNum.isPresent())
            pipeline.setCompostMaxComponentNum(compostMaxComponentNum.get());
        if (compostScoreThreshold.isPresent())
            pipeline.setCompostScoreThreshold(compostScoreThreshold.get());
        if (compostSimilarityThreshold.isPresent())
            pipeline.setCompostSegmentSimilarityThreshold(compostSimilarityThreshold.get());
        pipeline.aeCompostSplitter();

        // syntactic variant gathering
        pipeline.aeSyntacticVariantGatherer();

        // graphical variant gathering
        pipeline.setGraphicalVariantSimilarityThreshold(graphicalSimilarityThreshold);
        pipeline.aeGraphicalVariantGatherer();

        if (periodicFilteringProperty.isPresent())
            pipeline.aeMaxSizeThresholdCleaner(periodicFilteringProperty.get(), maxSizeFilteringMaxSize);

        // contextualize
        if (contextualize) {
            pipeline.setContextualizeCoTermsType(
                    allowMWTInContexts ? OccurrenceType.ALL : OccurrenceType.SINGLE_WORD)
                    .aeContextualizer(contextScope, contextualizeAllTerms);

        }

        pipeline.aeExtensionDetector().aeScorer().aeRanker(TermProperty.SPECIFICITY, true);

        // filtering
        if (cleaningThreshold.isPresent()) {
            pipeline.setKeepVariantsWhileCleaning(keepVariantsWhileCleaning);
            pipeline.aeThresholdCleaner(cleaningProperty.get(), cleaningThreshold.get());
        } else if (cleaningTopN.isPresent()) {
            pipeline.setKeepVariantsWhileCleaning(keepVariantsWhileCleaning);
            pipeline.aeTopNCleaner(cleaningProperty.get(), cleaningTopN.get());
        }

        // stats
        pipeline.haeCasStatCounter("at end of pipeline");

        // Export
        if (tsvFile.isPresent()) {
            if (tsvProperties.isPresent()) {
                pipeline.setTsvExportProperties(tsvProperties.get());
                pipeline.setTsvShowScores(tsvShowVariantScores);
            } else
                pipeline.setTsvExportProperties(TermProperty.PILOT, TermProperty.FREQUENCY);
            pipeline.haeTsvExporter(tsvFile.get());

        }
        if (tbxFile.isPresent())
            pipeline.haeTbxExporter(tbxFile.get());
        if (jsonFile.isPresent()) {
            pipeline.setExportJsonWithContext(contextualize);
            pipeline.setExportJsonWithOccurrences(true);
            if (mongoStoreSoftLinked)
                pipeline.linkMongoStore();
            pipeline.haeJsonExporter(jsonFile.get());
        }

        // run the pipeline
        final String termIndexName = "ScriptTermIndex_" + System.currentTimeMillis();
        if (collectionMode == CollectionMode.INLINE_TEXT) {
            LOGGER.info("Running TermSuite pipeline (inline mode)");
            JCas cas = JCasFactory.createJCas();
            cas.setDocumentText(inlineText);
            cas.setDocumentLanguage(language.getCode());
            pipeline.run(cas);
            System.err.flush();
            System.out.println("Term index: ");
            TermIndex index = (TermIndex) TermSuiteResourceManager.getInstance().get(termIndexName);
            TermUtils.showIndex(index, System.out, watch);
        } else {
            LOGGER.info("Running TermSuite pipeline in corpus mode");
            pipeline.run();
            if (watch.isPresent())
                TermUtils.showIndex((TermIndex) TermSuiteResourceManager.getInstance().get(termIndexName),
                        new PrintStream(System.err, true, "UTF-8"), watch);
        }
        LOGGER.info("Script executed in " + sw.toString());

    } catch (ParseException e) {
        TermSuiteCLIUtils.printUsage(e, USAGE, options);
    }
}