Example usage for com.google.common.base Stopwatch reset

List of usage examples for com.google.common.base Stopwatch reset

Introduction

In this page you can find the example usage for com.google.common.base Stopwatch reset.

Prototype

public Stopwatch reset() 

Source Link

Document

Sets the elapsed time for this stopwatch to zero, and places it in a stopped state.

Usage

From source file:es.usc.citius.composit.core.composition.search.NaiveForwardServiceDiscoverer.java

public ServiceMatchNetwork<E, T> search(Signature<E> signature) {
    Set<E> availableInputs = new HashSet<E>(signature.getInputs());
    Set<E> newOutputs = new HashSet<E>(signature.getInputs());
    Set<Operation<E>> usedServices = new HashSet<Operation<E>>();
    List<Set<Operation<E>>> leveledOps = new LinkedList<Set<Operation<E>>>();

    boolean checkExpectedOutputs = !signature.getOutputs().isEmpty();
    boolean stop;

    Stopwatch timer = Stopwatch.createStarted();
    Stopwatch levelTimer = Stopwatch.createUnstarted();
    int level = 0;
    do {/*from   www .  j a  v  a  2 s  . co  m*/
        HashSet<Operation<E>> candidates = new HashSet<Operation<E>>();
        levelTimer.start();
        candidates.addAll(discovery.findOperationsConsumingSome(newOutputs));
        log.info("(Level {}) {} potential candidates selected in {}", level++, candidates.size(),
                levelTimer.toString());
        // Remove services that cannot be invoked with the available inputs
        for (Iterator<Operation<E>> it = candidates.iterator(); it.hasNext();) {
            Operation<E> candidate = it.next();
            Set<E> matched = matcher.partialMatch(availableInputs, candidate.getSignature().getInputs())
                    .getTargetElements();
            // Invokable?
            if (matched.equals(candidate.getSignature().getInputs())) {
                // Invokable operation, check if it was used previously
                boolean isNew = usedServices.add(candidate);
                if (!isNew)
                    it.remove();
            } else {
                it.remove();
            }
        }
        log.info("\t + [{}] operations selected for this level in {}: {}", candidates.size(),
                levelTimer.toString(), candidates);

        // Collect the new outputs of the new candidates
        newOutputs = Operations.outputs(candidates);
        availableInputs.addAll(newOutputs);
        Set<E> matchedOutputs = matcher.partialMatch(availableInputs, signature.getOutputs())
                .getTargetElements();

        // Add the discovered ops
        if (!candidates.isEmpty())
            leveledOps.add(candidates);

        log.debug("\t + Available inputs: {}, new outputs: {}", availableInputs.size(), newOutputs.size());
        // Stop condition. Stop if there are no more candidates and/or expected outputs are satisfied.
        stop = (checkExpectedOutputs) ? candidates.isEmpty() || matchedOutputs.equals(signature.getOutputs())
                : candidates.isEmpty();
        levelTimer.reset();
    } while (!stop);

    // Add the source and sink operations
    Source<E> sourceOp = new Source<E>(signature.getInputs());
    Sink<E> sinkOp = new Sink<E>(signature.getOutputs());
    leveledOps.add(0, Collections.<Operation<E>>singleton(sourceOp));
    leveledOps.add(leveledOps.size(), Collections.<Operation<E>>singleton(sinkOp));
    Stopwatch networkWatch = Stopwatch.createStarted();
    // Create a service match network with the discovered services
    DirectedAcyclicSMN<E, T> matchNetwork = new DirectedAcyclicSMN<E, T>(new HashLeveledServices<E>(leveledOps),
            this.matcher);
    log.info(" > Service match network computed in {}", networkWatch.stop().toString());
    log.info("Service Match Network created with {} levels (including source and sink) and {} operations.",
            leveledOps.size(), matchNetwork.listOperations().size());
    log.info("Forward Discovery done in {}", timer.toString());
    return matchNetwork;
}

From source file:nextmethod.web.razor.editor.internal.BackgroundThread.java

private void workerLoop() {
    final boolean isEditorTracing = Debug.isDebugArgPresent(DebugArgs.EditorTracing);
    final String fileNameOnly = Filesystem.getFileName(fileName);

    Stopwatch sw = null;
    if (isEditorTracing) {
        sw = Stopwatch.createUnstarted();
    }/* w  w  w . ja v a 2 s .c o m*/

    try {
        RazorEditorTrace.traceLine(RazorResources().traceBackgroundThreadStart(fileNameOnly));
        ensureOnThread();
        while (!shutdownToken.isCancellationRequested()) {
            // Grab the parcel of work to do
            final WorkParcel parcel = main.getParcel();
            if (!parcel.getChanges().isEmpty()) {
                RazorEditorTrace.traceLine(RazorResources().traceChangesArrived(fileNameOnly,
                        String.valueOf(parcel.getChanges().size())));
                try {
                    DocumentParseCompleteEventArgs args = null;
                    try (CancellationTokenSource linkedCancel = CancellationTokenSource
                            .createLinkedTokenSource(shutdownToken, parcel.getCancelToken())) {
                        if (parcel != null && !linkedCancel.isCancellationRequested()) {
                            // Collect ALL changes
                            if (isEditorTracing && previouslyDiscarded != null
                                    && !previouslyDiscarded.isEmpty()) {
                                RazorEditorTrace.traceLine(RazorResources().traceCollectedDiscardedChanges(
                                        fileNameOnly, String.valueOf(parcel.getChanges().size())));
                            }
                            final Iterable<TextChange> allChanges = Iterables
                                    .concat(previouslyDiscarded != null ? previouslyDiscarded
                                            : Collections.<TextChange>emptyList(), parcel.getChanges());

                            final TextChange finalChange = Iterables.getLast(allChanges, null);
                            if (finalChange != null) {
                                if (isEditorTracing) {
                                    assert sw != null;
                                    sw.reset().start();
                                }

                                //noinspection ConstantConditions
                                final GeneratorResults results = parseChange(finalChange.getNewBuffer(),
                                        linkedCancel.getToken());

                                if (isEditorTracing) {
                                    assert sw != null;
                                    sw.stop();
                                }

                                RazorEditorTrace.traceLine(RazorResources().traceParseComplete(fileNameOnly,
                                        sw != null ? sw.toString() : "?"));

                                if (results != null && !linkedCancel.isCancellationRequested()) {
                                    // Clear discarded changes list
                                    previouslyDiscarded = Lists.newArrayList();
                                    // Take the current tree and check for differences
                                    if (isEditorTracing) {
                                        sw.reset().start();
                                    }
                                    final boolean treeStructureChanged = currentParseTree == null
                                            || BackgroundParser.treesAreDifferent(currentParseTree,
                                                    results.getDocument(), allChanges, parcel.getCancelToken());

                                    if (isEditorTracing) {
                                        sw.stop();
                                    }

                                    currentParseTree = results.getDocument();
                                    RazorEditorTrace.traceLine(RazorResources().traceTreesCompared(fileNameOnly,
                                            sw != null ? sw.toString() : "?",
                                            String.valueOf(treeStructureChanged)));

                                    // Build Arguments
                                    args = new DocumentParseCompleteEventArgs(treeStructureChanged, results,
                                            finalChange);
                                } else {
                                    // Parse completed but we were cancelled in the mean time. Add these to the discarded changes set
                                    RazorEditorTrace.traceLine(RazorResources().traceChangesDiscarded(
                                            fileNameOnly, String.valueOf(Iterables.size(allChanges))));
                                    previouslyDiscarded = Lists.newArrayList(allChanges);
                                }

                                if (Debug.isDebugArgPresent(DebugArgs.CheckTree) && args != null) {
                                    // Rewind the buffer and sanity check the line mappings
                                    finalChange.getNewBuffer().setPosition(0);
                                    final String buffer = TextExtensions.readToEnd(finalChange.getNewBuffer());
                                    final int lineCount = Iterables
                                            .size(Splitter.on(CharMatcher.anyOf("\r\n")).split(buffer));
                                    Debug.doAssert(!Iterables.any(
                                            args.getGeneratorResults().getDesignTimeLineMappingEntries(),
                                            input -> input != null
                                                    && input.getValue().getStartLine() > lineCount),
                                            "Found a design-time line mapping referring to a line outside the source file!");

                                    Debug.doAssert(
                                            !Iterables.any(args.getGeneratorResults().getDocument().flatten(),
                                                    input -> input != null
                                                            && input.getStart().getLineIndex() > lineCount),
                                            "Found a span with a line number outside the source file");
                                }
                            }
                        }
                    }
                    if (args != null) {
                        main.returnParcel(args);
                    }
                } catch (OperationCanceledException ignored) {

                }
            } else {
                RazorEditorTrace.traceLine(RazorResources().traceNoChangesArrived(fileName),
                        parcel.getChanges().size());
                Thread.yield();
            }
        }
    } catch (OperationCanceledException ignored) {
    } finally {
        RazorEditorTrace.traceLine(RazorResources().traceBackgroundThreadShutdown(fileNameOnly));
        // Clean up main thread resources
        main.close();
    }
}

From source file:es.usc.citius.composit.core.composition.optimization.BackwardMinimizationOptimizer.java

@Override
public ServiceMatchNetwork<E, T> optimize(ServiceMatchNetwork<E, T> network) {
    Stopwatch globalWatch = Stopwatch.createStarted();
    Stopwatch localWatch = Stopwatch.createUnstarted();
    Set<E> newInputs = new HashSet<E>();
    List<Set<Operation<E>>> optimized = new ArrayList<Set<Operation<E>>>(network.numberOfLevels());
    log.debug("Starting service-backward optimization...");
    localWatch.start();/*w w  w . ja va2  s  . c  om*/
    for (int i = network.numberOfLevels() - 1; i >= 0; i--) {
        Set<Operation<E>> current = network.getOperationsAtLevel(i);
        log.debug(" > Analyzing network level {} : {}", i, current);
        Set<Operation<E>> optimizedSet = new HashSet<Operation<E>>();
        Set<E> futureInputs = new HashSet<E>();
        // Find all services that produces at least one of the required inputs. If new inputs is
        // empty, then select all
        for (Operation<E> op : current) {
            log.debug("\t\tChecking operation {}", op.getID());
            if (newInputs.isEmpty()) {
                futureInputs.addAll(op.getSignature().getInputs());
                optimizedSet.add(op);
                log.debug("\t\t+ {} selected as a mandatory operation", op.getID());
            } else {
                boolean used = false;
                next: for (E output : op.getSignature().getOutputs()) {
                    for (E input : newInputs) {
                        used = network.match(output, input) != null;
                        if (used) {
                            log.debug(
                                    "\t\t+ Operation {} marked as useful (match detected between output {} and input {})",
                                    op.getID(), output, input);
                            optimizedSet.add(op);
                            // Update new inputs
                            futureInputs.addAll(op.getSignature().getInputs());
                            break next;
                        }
                    }
                }
                if (!used)
                    log.debug("\t\t- Operation {} marked as useless", op.getID());
            }
            //log.debug(" Inputs for the next iteration: {}", futureInputs);
        }
        newInputs.addAll(futureInputs);
        optimized.add(optimizedSet);
    }
    Collections.reverse(optimized);
    // Create a new match network
    localWatch.reset().start();
    ServiceMatchNetwork<E, T> optimizedNetwork = new DirectedAcyclicSMN<E, T>(
            new HashLeveledServices<E>(optimized), network);
    localWatch.stop();
    log.debug(" > Optimized match network created in {}", localWatch.toString());
    log.debug("Backward Optimization done in {}. Size before/after {}/{}", globalWatch.stop().toString(),
            network.listOperations().size(), optimizedNetwork.listOperations().size());
    // Create a new optimized service match network
    return optimizedNetwork;
}

From source file:com.thinkbiganalytics.feedmgr.service.feed.DefaultFeedManagerFeedService.java

private void saveFeed(final FeedMetadata feed) {
    if (StringUtils.isBlank(feed.getId())) {
        feed.setIsNew(true);/*from   w  w w.j  a v a2 s .  c  o  m*/
    }
    metadataAccess.commit(() -> {
        Stopwatch stopwatch = Stopwatch.createStarted();
        List<? extends HadoopSecurityGroup> previousSavedSecurityGroups = null;
        // Store the old security groups before saving beccause we need to compare afterward
        if (feed.isNew()) {
            Feed existing = feedProvider.findBySystemName(feed.getCategory().getSystemName(),
                    feed.getSystemFeedName());
            // Since we know this is expected to be new check if the category/feed name combo is already being used.
            if (existing != null) {
                throw new DuplicateFeedNameException(feed.getCategoryName(), feed.getFeedName());
            }
        } else {
            Feed previousStateBeforeSaving = feedProvider.findById(feedProvider.resolveId(feed.getId()));
            Map<String, String> userProperties = previousStateBeforeSaving.getUserProperties();
            previousSavedSecurityGroups = previousStateBeforeSaving.getSecurityGroups();
        }

        //if this is the first time saving this feed create a new one
        Feed domainFeed = feedModelTransform.feedToDomain(feed);

        if (domainFeed.getState() == null) {
            domainFeed.setState(Feed.State.ENABLED);
        }
        stopwatch.stop();
        log.debug("Time to transform the feed to a domain object for saving: {} ms",
                stopwatch.elapsed(TimeUnit.MILLISECONDS));
        stopwatch.reset();

        //initially save the feed
        if (feed.isNew()) {
            stopwatch.start();
            domainFeed = feedProvider.update(domainFeed);
            stopwatch.stop();
            log.debug("Time to save the New feed: {} ms", stopwatch.elapsed(TimeUnit.MILLISECONDS));
            stopwatch.reset();
        }

        final String domainId = domainFeed.getId().toString();
        final String feedName = FeedNameUtil.fullName(domainFeed.getCategory().getSystemName(),
                domainFeed.getName());

        // Build preconditions
        stopwatch.start();
        assignFeedDependencies(feed, domainFeed);
        stopwatch.stop();
        log.debug("Time to assignFeedDependencies: {} ms", stopwatch.elapsed(TimeUnit.MILLISECONDS));
        stopwatch.reset();

        //Assign the datasources
        stopwatch.start();
        assignFeedDatasources(feed, domainFeed);
        stopwatch.stop();
        log.debug("Time to assignFeedDatasources: {} ms", stopwatch.elapsed(TimeUnit.MILLISECONDS));
        stopwatch.reset();

        stopwatch.start();
        boolean isStream = feed.getRegisteredTemplate() != null ? feed.getRegisteredTemplate().isStream()
                : false;
        Long timeBetweenBatchJobs = feed.getRegisteredTemplate() != null
                ? feed.getRegisteredTemplate().getTimeBetweenStartingBatchJobs()
                : 0L;
        //sync the feed information to ops manager
        metadataAccess.commit(() -> opsManagerFeedProvider.save(opsManagerFeedProvider.resolveId(domainId),
                feedName, isStream, timeBetweenBatchJobs));

        stopwatch.stop();
        log.debug("Time to sync feed data with Operations Manager: {} ms",
                stopwatch.elapsed(TimeUnit.MILLISECONDS));
        stopwatch.reset();

        // Update hadoop security group polices if the groups changed
        if (!feed.isNew()
                && !ListUtils.isEqualList(previousSavedSecurityGroups, domainFeed.getSecurityGroups())) {
            stopwatch.start();
            List<? extends HadoopSecurityGroup> securityGroups = domainFeed.getSecurityGroups();
            List<String> groupsAsCommaList = securityGroups.stream().map(group -> group.getName())
                    .collect(Collectors.toList());
            hadoopAuthorizationService.updateSecurityGroupsForAllPolicies(feed.getSystemCategoryName(),
                    feed.getSystemFeedName(), groupsAsCommaList, domainFeed.getProperties());
            stopwatch.stop();
            log.debug("Time to update hadoop security groups: {} ms", stopwatch.elapsed(TimeUnit.MILLISECONDS));
            stopwatch.reset();
        }

        // Update Hive metastore
        stopwatch.start();
        final boolean hasHiveDestination = domainFeed.getDestinations().stream()
                .map(FeedDestination::getDatasource).filter(DerivedDatasource.class::isInstance)
                .map(DerivedDatasource.class::cast)
                .anyMatch(datasource -> "HiveDatasource".equals(datasource.getDatasourceType()));
        if (hasHiveDestination) {
            try {
                feedHiveTableService.updateColumnDescriptions(feed);
            } catch (final DataAccessException e) {
                log.warn("Failed to update column descriptions for feed: {}",
                        feed.getCategoryAndFeedDisplayName(), e);
            }
        }
        stopwatch.stop();
        log.debug("Time to update hive metastore: {} ms", stopwatch.elapsed(TimeUnit.MILLISECONDS));
        stopwatch.reset();

        // Update Kylo metastore
        stopwatch.start();
        domainFeed = feedProvider.update(domainFeed);
        stopwatch.stop();
        log.debug("Time to call feedProvider.update: {} ms", stopwatch.elapsed(TimeUnit.MILLISECONDS));
        stopwatch.reset();
    }, (e) -> {
        if (feed.isNew() && StringUtils.isNotBlank(feed.getId())) {
            //Rollback ops Manager insert if it is newly created
            metadataAccess.commit(() -> {
                opsManagerFeedProvider.delete(opsManagerFeedProvider.resolveId(feed.getId()));
            });
        }
    });

}

From source file:com.b2international.snowowl.snomed.datastore.request.rf2.SnomedRf2ImportRequest.java

void doImport(final String userId, final BranchContext context, final File rf2Archive) throws Exception {
    try (final DB db = createDb()) {
        final Map<String, Long> storageKeysByComponent = db
                .hashMap("storageKeysByComponent", Serializer.STRING, Serializer.LONG).create();
        final Map<String, Long> storageKeysByRefSet = db
                .hashMap("storageKeysByRefSet", Serializer.STRING, Serializer.LONG).create();

        // TODO in case of FULL or SNAPSHOT import load all component storage key pairs into the above Maps, so we can avoid loading them during tx commit
        if (!isLoadOnDemandEnabled()) {
            Stopwatch w = Stopwatch.createStarted();
            System.err.println("Loading available components IDs and StorageKeys took...");
            for (Class<?> type : ImmutableList.of(SnomedConceptDocument.class,
                    SnomedDescriptionIndexEntry.class, SnomedRelationshipIndexEntry.class,
                    SnomedRefSetMemberIndexEntry.class)) {
                for (Hits<Map> hits : context.service(RevisionSearcher.class)
                        .scroll(Query.select(Map.class).from(type)
                                .fields(RevisionDocument.Fields.ID, Revision.STORAGE_KEY,
                                        SnomedConceptDocument.Fields.REFSET_STORAGEKEY)
                                .where(Expressions.matchAll()).limit(10_000).build())) {
                    for (Map hit : hits) {
                        final String componentId = (String) hit.get(RevisionDocument.Fields.ID);
                        final long storageKey = (long) hit.get(Revision.STORAGE_KEY);
                        storageKeysByComponent.put(componentId, storageKey);
                        // add refset storagekey if this concept has a non negative value
                        if (type == SnomedConceptDocument.class
                                && hit.containsKey(SnomedConceptDocument.Fields.REFSET_STORAGEKEY)) {
                            final long refSetStorageKey = (long) hit
                                    .get(SnomedConceptDocument.Fields.REFSET_STORAGEKEY);
                            if (refSetStorageKey != -1L) {
                                storageKeysByRefSet.put(componentId, refSetStorageKey);
                            }/*from www .j a v  a 2 s  . co  m*/
                        }
                    }
                }
            }
            System.err.println("Loading available components IDs and StorageKeys took: " + w);
        }

        // create executor service to parallel update the underlying index store

        final Rf2EffectiveTimeSlices effectiveTimeSlices = new Rf2EffectiveTimeSlices(db,
                storageKeysByComponent, storageKeysByRefSet, isLoadOnDemandEnabled());
        Stopwatch w = Stopwatch.createStarted();
        read(rf2Archive, effectiveTimeSlices, storageKeysByComponent, storageKeysByRefSet);
        System.err.println("Preparing RF2 import took: " + w);
        w.reset().start();

        for (Rf2EffectiveTimeSlice slice : effectiveTimeSlices.consumeInOrder()) {
            slice.doImport(userId, context, createVersions);
        }
    }
}

From source file:org.n52.youngs.control.impl.SingleThreadBulkRunner.java

@Override
public Report load(final Sink sink) {
    this.sink = sink;
    Objects.nonNull(source);// w w  w .  ja  v a2s  .  c  o m
    Objects.nonNull(mapper);
    Objects.nonNull(this.sink);

    log.info("Starting harvest from {} to {} with {}", source, this.sink, mapper);
    Report report = new ReportImpl();

    try {
        boolean prepareSink = sink.prepare(mapper.getMapper());
        if (!prepareSink) {
            String msg = "The sink could not be prepared. Stopping load, please check the logs.";
            log.error(msg);
            report.addMessage(msg);
            return report;
        }
    } catch (SinkError e) {
        log.error("Problem preparing sink", e);
        report.addMessage(String.format("Problem preparing sink: %s", e.getMessage()));
        return report;
    }

    final Stopwatch timer = Stopwatch.createStarted();
    long pageStart = startPosition;
    long count = source.getRecordCount();
    final long limit = Math.min(recordsLimit + startPosition, count);

    final Stopwatch sourceTimer = Stopwatch.createUnstarted();
    final Stopwatch mappingTimer = Stopwatch.createUnstarted();
    final Stopwatch sinkTimer = Stopwatch.createUnstarted();
    final Stopwatch currentBulkTimer = Stopwatch.createUnstarted();
    double bulkTimeAvg = 0d;
    long runNumber = 0;

    while (pageStart <= limit) {
        currentBulkTimer.start();

        long recordsLeft = limit - pageStart + 1;
        long size = Math.min(recordsLeft, bulkSize);
        if (size <= 0) {
            break;
        }
        log.info("### [{}] Requesting {} records from {} starting at {}, last requested record will be {} ###",
                runNumber, size, source.getEndpoint(), pageStart, limit);

        try {
            sourceTimer.start();
            Collection<SourceRecord> records = source.getRecords(pageStart, size, report);
            sourceTimer.stop();

            log.debug("Mapping {} retrieved records.", records.size());
            mappingTimer.start();
            List<SinkRecord> mappedRecords = records.stream().map(record -> {
                try {
                    return mapper.map(record);
                } catch (MappingError e) {
                    report.addFailedRecord(record.toString(), "Problem during mapping: " + e.getMessage());
                    return null;
                }
            }).filter(Objects::nonNull).collect(Collectors.toList());
            mappingTimer.stop();

            log.debug("Storing {} mapped records.", mappedRecords.size());
            if (!testRun) {
                sinkTimer.start();
                mappedRecords.forEach(record -> {
                    try {
                        boolean result = sink.store(record);
                        if (result) {
                            report.addSuccessfulRecord(record.getId());
                        } else {
                            report.addFailedRecord(record.getId(), "see sink log");
                        }
                    } catch (SinkError e) {
                        report.addFailedRecord(record.toString(), "Problem during mapping: " + e.getMessage());
                    }
                });
                sinkTimer.stop();
            } else {
                log.info("TESTRUN, created documents are:\n{}", Arrays.toString(mappedRecords.toArray()));
            }

        } catch (RuntimeException e) {
            if (sourceTimer.isRunning()) {
                sourceTimer.stop();
            }
            if (mappingTimer.isRunning()) {
                mappingTimer.stop();
            }
            if (sinkTimer.isRunning()) {
                sinkTimer.stop();
            }

            String msg = String.format("Problem processing records %s to %s: %s", pageStart, pageStart + size,
                    e.getMessage());
            log.error(msg, e);
            report.addMessage(msg);
        }

        pageStart += bulkSize;

        currentBulkTimer.stop();
        bulkTimeAvg = ((bulkTimeAvg * runNumber) + currentBulkTimer.elapsed(TimeUnit.SECONDS))
                / (runNumber + 1);
        updateAndLog(runNumber, (runNumber + 1) * bulkSize, currentBulkTimer.elapsed(TimeUnit.SECONDS),
                bulkTimeAvg);
        currentBulkTimer.reset();

        runNumber++;
    }

    timer.stop();
    log.info("Completed harvesting for {} ({} failed) of {} records in {} minutes",
            report.getNumberOfRecordsAdded(), report.getNumberOfRecordsFailed(), source.getRecordCount(),
            timer.elapsed(TimeUnit.MINUTES));
    log.info("Time spent (minutes): source={}, mapping={}, sink={}", sourceTimer.elapsed(TimeUnit.MINUTES),
            mappingTimer.elapsed(TimeUnit.MINUTES), sinkTimer.elapsed(TimeUnit.MINUTES));

    return report;
}

From source file:es.usc.citius.composit.core.composition.search.ForwardServiceDiscoverer.java

public ServiceMatchNetwork<E, T> search(Signature<E> signature) {
    Set<E> availableInputs = new HashSet<E>(signature.getInputs());
    Set<E> newOutputs = new HashSet<E>(signature.getInputs());
    Set<E> unmatchedOutputs = new HashSet<E>(signature.getOutputs());
    Set<Operation<E>> usedServices = new HashSet<Operation<E>>();
    Map<Operation<E>, Set<E>> unmatchedInputMap = new HashMap<Operation<E>, Set<E>>();
    List<Set<Operation<E>>> leveledOps = new LinkedList<Set<Operation<E>>>();

    boolean checkExpectedOutputs = !signature.getOutputs().isEmpty();
    boolean stop;

    Stopwatch timer = Stopwatch.createStarted();
    Stopwatch levelTimer = Stopwatch.createUnstarted();
    int level = 0;
    do {//ww  w  .j a v  a2 s  . co m
        HashSet<Operation<E>> candidates = new HashSet<Operation<E>>();
        levelTimer.start();
        candidates.addAll(discovery.findOperationsConsumingSome(newOutputs));
        log.info("(Level {}) {} potential candidates selected in {}", level++, candidates.size(),
                levelTimer.toString());
        // Remove services that cannot be invoked with the available inputs
        for (Iterator<Operation<E>> it = candidates.iterator(); it.hasNext();) {
            Operation<E> candidate = it.next();
            // Retrieve the unmatched inputs for this operation
            Set<E> unmatchedInputs = unmatchedInputMap.get(candidate);
            if (unmatchedInputs == null) {
                unmatchedInputs = candidate.getSignature().getInputs();
            }
            // Check if the new concepts match some unmatched inputs
            Set<E> matched = matcher.partialMatch(newOutputs, unmatchedInputs).getTargetElements();

            // Don't check invokability
            if (relaxedMatchCondition) {
                // Remove only if there is no match at all
                if (matched.isEmpty()) {
                    it.remove();
                } else {
                    boolean isNew = usedServices.add(candidate);
                    if (!isNew)
                        it.remove();
                }
            } else {
                // Update the unmatchedInputs
                unmatchedInputs = Sets.newHashSet(Sets.difference(unmatchedInputs, matched));
                unmatchedInputMap.put(candidate, unmatchedInputs);
                // If there are no unmatched inputs, the service is invokable!
                if (!unmatchedInputs.isEmpty()) {
                    it.remove();
                } else {
                    // Invokable operation, check if it was used previously
                    boolean isNew = usedServices.add(candidate);
                    if (!isNew)
                        it.remove();
                }
            }
        }
        log.info("\t + [{}] operations selected for this level in {}", candidates.size(),
                levelTimer.toString());
        log.debug("\t\t Candidates: {}", candidates);

        // Collect the new outputs of the new candidates
        Set<E> nextOutputs = Operations.outputs(candidates);

        // Check unmatched outputs
        Set<E> matchedOutputs = matcher.partialMatch(Sets.union(newOutputs, nextOutputs), unmatchedOutputs)
                .getTargetElements();
        //Set<Resource> matchedOutputs = matcher.matched(newOutputs, unmatchedOutputs);
        // Update the unmatched outputs
        unmatchedOutputs = Sets.newHashSet(Sets.difference(unmatchedOutputs, matchedOutputs));

        // Update for the next iteration
        availableInputs.addAll(newOutputs);
        newOutputs = nextOutputs;

        // Add the discovered ops
        if (!candidates.isEmpty())
            leveledOps.add(candidates);

        log.debug("\t + Available inputs: {}, new outputs: {}", availableInputs.size(), newOutputs.size());
        // Stop condition. Stop if there are no more candidates and/or expected outputs are satisfied.
        stop = (checkExpectedOutputs) ? candidates.isEmpty() || unmatchedOutputs.isEmpty()
                : candidates.isEmpty();
        levelTimer.reset();
    } while (!stop);

    // Add the source and sink operations
    Source<E> sourceOp = new Source<E>(signature.getInputs());
    Sink<E> sinkOp = new Sink<E>(signature.getOutputs());
    leveledOps.add(0, Collections.<Operation<E>>singleton(sourceOp));
    leveledOps.add(leveledOps.size(), Collections.<Operation<E>>singleton(sinkOp));
    Stopwatch networkWatch = Stopwatch.createStarted();
    // Create a service match network with the discovered services
    DirectedAcyclicSMN<E, T> matchNetwork = new DirectedAcyclicSMN<E, T>(new HashLeveledServices<E>(leveledOps),
            this.matcher);
    log.info(" > Service match network computed in {}", networkWatch.stop().toString());
    log.info("Service Match Network created with {} levels (including source and sink) and {} operations.",
            leveledOps.size(), matchNetwork.listOperations().size());
    log.info("Forward Discovery done in {}", timer.toString());
    this.unmatchedInputMap = unmatchedInputMap;
    return matchNetwork;
}

From source file:org.opencb.cellbase.app.transform.VariationParser.java

@Override
public void parse() throws IOException, InterruptedException, SQLException, ClassNotFoundException {

    if (!Files.exists(variationDirectoryPath) || !Files.isDirectory(variationDirectoryPath)
            || !Files.isReadable(variationDirectoryPath)) {
        throw new IOException(
                "Variation directory whether does not exist, is not a directory or cannot be read");
    }/*from  www  . j  a  v  a  2s .  co m*/
    if (!existsZippedOrUnzippedFile(VARIATION_FILENAME)
            || isEmpty(variationDirectoryPath.resolve(VARIATION_FILENAME).toString())) {
        throw new IOException("variation.txt.gz whether does not exist, is not a directory or cannot be read");
    }

    Variation variation;

    // To speed up calculation a SQLite database is created with the IDs and file offsets,
    // file must be uncompressed for doing this.
    gunzipVariationInputFiles();

    // add idVariation to transcript_variation file
    preprocessInputFiles();

    // Open variation file, this file never gets uncompressed. It's read from gzip file
    BufferedReader bufferedReaderVariation = getBufferedReader(PREPROCESSED_VARIATION_FILENAME);

    // create buffered readers for all other input files
    createVariationFilesBufferedReaders();

    Map<String, String> seqRegionMap = VariationUtils.parseSeqRegionToMap(variationDirectoryPath);
    Map<String, String> sourceMap = VariationUtils.parseSourceToMap(variationDirectoryPath);

    initializeVariationRelatedArrays();
    Stopwatch globalStartwatch = Stopwatch.createStarted();
    Stopwatch batchWatch = Stopwatch.createStarted();
    logger.info("Parsing variation file " + variationDirectoryPath.resolve(PREPROCESSED_VARIATION_FILENAME)
            + " ...");
    long countprocess = 0;
    String line;
    while ((line = bufferedReaderVariation.readLine()) != null) {
        String[] variationFields = line.split("\t");

        int variationId = Integer.parseInt(variationFields[0]);

        List<String[]> resultVariationFeature = getVariationRelatedFields(VARIATION_FEATURE_FILE_ID,
                variationId);
        if (resultVariationFeature != null && resultVariationFeature.size() > 0) {
            String[] variationFeatureFields = resultVariationFeature.get(0);

            List<TranscriptVariation> transcriptVariation = getTranscriptVariations(variationId,
                    variationFeatureFields[0]);
            List<Xref> xrefs = getXrefs(sourceMap, variationId);

            try {
                // Preparing the variation alleles
                String[] allelesArray = getAllelesArray(variationFeatureFields);

                // For code sanity save chromosome, start, end and id
                String chromosome = seqRegionMap.get(variationFeatureFields[1]);

                if (!chromosome.contains("PATCH") && !chromosome.contains("HSCHR")
                        && !chromosome.contains("contig")) {
                    int start = (variationFeatureFields != null) ? Integer.valueOf(variationFeatureFields[2])
                            : 0;
                    int end = (variationFeatureFields != null) ? Integer.valueOf(variationFeatureFields[3]) : 0;
                    String id = (variationFields[2] != null && !variationFields[2].equals("\\N"))
                            ? variationFields[2]
                            : "";
                    String reference = (allelesArray[0] != null && !allelesArray[0].equals("\\N"))
                            ? allelesArray[0]
                            : "";
                    String alternate = (allelesArray[1] != null && !allelesArray[1].equals("\\N"))
                            ? allelesArray[1]
                            : "";

                    // Preparing frequencies
                    //List<PopulationFrequency> populationFrequencies = getPopulationFrequencies(variationId, allelesArray);
                    List<PopulationFrequency> populationFrequencies = getPopulationFrequencies(chromosome,
                            start, end, id, reference, alternate);

                    // TODO: check that variationFeatureFields is always different to null and intergenic-variant is never used
                    //List<String> consequenceTypes = (variationFeatureFields != null) ? Arrays.asList(variationFeatureFields[12].split(",")) : Arrays.asList("intergenic_variant");
                    List<String> consequenceTypes = Arrays.asList(variationFeatureFields[12].split(","));
                    String displayConsequenceType = getDisplayConsequenceType(consequenceTypes);

                    // we have all the necessary to construct the 'variation' object
                    variation = buildVariation(variationFields, variationFeatureFields, chromosome, start, end,
                            id, reference, alternate, transcriptVariation, xrefs, populationFrequencies,
                            allelesArray, consequenceTypes, displayConsequenceType);
                    fileSerializer.serialize(variation, getOutputFileName(chromosome));
                }

                if (++countprocess % 100000 == 0 && countprocess != 0) {
                    logger.info("Processed variations: " + countprocess);
                    logger.debug("Elapsed time processing batch: " + batchWatch);
                    batchWatch.reset();
                    batchWatch.start();
                }

            } catch (Exception e) {
                e.printStackTrace();
                logger.error("Error parsing variation: " + e.getMessage());
                logger.error("Last line processed: " + line);
                break;
            }
        }
        // TODO: just for testing, remove
        //if (countprocess % 100000 == 0) {
        //    break;
        //}
    }

    logger.info("Variation parsing finished");
    logger.info("Variants processed: " + countprocess);
    logger.debug("Elapsed time parsing: " + globalStartwatch);

    gzipVariationFiles(variationDirectoryPath);

    try {
        bufferedReaderVariation.close();
    } catch (Exception e) {
        e.printStackTrace();
    }
}

From source file:org.glowroot.central.repo.SchemaUpgrade.java

private void populateTraceTtSlowCountAndPointPartialPart1() throws Exception {
    logger.info("populating trace_tt_slow_count_partial and trace_tt_slow_point_partial tables"
            + " - this could take several minutes on large data sets...");
    CentralStorageConfig storageConfig = getCentralStorageConfig(session);
    dropTableIfExists("trace_tt_slow_count_partial");
    dropTableIfExists("trace_tt_slow_point_partial");
    session.createTableWithTWCS(//from   ww  w  .jav  a  2 s.c  o m
            "create table if not exists trace_tt_slow_count_partial"
                    + " (agent_rollup varchar, transaction_type varchar, capture_time timestamp,"
                    + " agent_id varchar, trace_id varchar, primary key ((agent_rollup,"
                    + " transaction_type), capture_time, agent_id, trace_id))",
            storageConfig.traceExpirationHours(), false, true);
    session.createTableWithTWCS(
            "create table if not exists trace_tt_slow_point_partial"
                    + " (agent_rollup varchar, transaction_type varchar, capture_time timestamp,"
                    + " agent_id varchar, trace_id varchar, duration_nanos bigint, error boolean,"
                    + " headline varchar, user varchar, attributes blob, primary key ((agent_rollup,"
                    + " transaction_type), capture_time, agent_id, trace_id))",
            storageConfig.traceExpirationHours(), false, true);
    PreparedStatement insertCountPartialPS = session.prepare(
            "insert into" + " trace_tt_slow_count_partial (agent_rollup, transaction_type, capture_time,"
                    + " agent_id, trace_id) values (?, ?, ?, ?, ?) using ttl ?");
    PreparedStatement insertPointPartialPS = session.prepare(
            "insert into" + " trace_tt_slow_point_partial (agent_rollup, transaction_type, capture_time,"
                    + " agent_id, trace_id, duration_nanos, error, headline, user, attributes) values"
                    + " (?, ?, ?, ?, ?, ?, ?, ?, ?, ?) using ttl ?");
    int ttl = getCentralStorageConfig(session).getTraceTTL();
    ResultSet results = session.read("select agent_rollup, transaction_type, capture_time,"
            + " agent_id, trace_id, duration_nanos, error, headline, user, attributes, partial"
            + " from trace_tt_slow_point");
    Queue<ListenableFuture<?>> futures = new ArrayDeque<>();
    Stopwatch stopwatch = Stopwatch.createStarted();
    int rowCount = 0;
    for (Row row : results) {
        if (!row.getBool(10)) { // partial
            // unfortunately cannot use "where partial = true allow filtering" in the query
            // above as that leads to ReadTimeoutException
            continue;
        }
        BoundStatement boundStatement = insertCountPartialPS.bind();
        int i = 0;
        copyString(row, boundStatement, i++); // agent_rollup
        copyString(row, boundStatement, i++); // transaction_type
        Date captureDate = checkNotNull(row.getTimestamp(i));
        int adjustedTTL = Common.getAdjustedTTL(ttl, captureDate.getTime(), clock);
        copyTimestamp(row, boundStatement, i++); // capture_time
        copyString(row, boundStatement, i++); // agent_id
        copyString(row, boundStatement, i++); // trace_id
        boundStatement.setInt(i++, adjustedTTL);
        futures.add(session.writeAsync(boundStatement));

        boundStatement = insertPointPartialPS.bind();
        i = 0;
        copyString(row, boundStatement, i++); // agent_rollup
        copyString(row, boundStatement, i++); // transaction_type
        copyTimestamp(row, boundStatement, i++); // capture_time
        copyString(row, boundStatement, i++); // agent_id
        copyString(row, boundStatement, i++); // trace_id
        copyLong(row, boundStatement, i++); // duration_nanos
        copyBool(row, boundStatement, i++); // error
        copyString(row, boundStatement, i++); // headline
        copyString(row, boundStatement, i++); // user
        copyBytes(row, boundStatement, i++); // attributes
        boundStatement.setInt(i++, adjustedTTL);
        futures.add(session.writeAsync(boundStatement));

        rowCount++;
        if (stopwatch.elapsed(SECONDS) > 60) {
            logger.info("processed {} records", rowCount);
            stopwatch.reset().start();
        }
        waitForSome(futures);
    }
    MoreFutures.waitForAll(futures);
    logger.info(
            "populating trace_tt_slow_count_partial and trace_tt_slow_point_partial tables" + " - complete");
}

From source file:org.glowroot.central.repo.SchemaUpgrade.java

private void populateTraceTnSlowCountAndPointPartialPart1() throws Exception {
    logger.info("populating trace_tn_slow_count_partial and trace_tn_slow_point_partial tables"
            + " - this could take several minutes on large data sets...");
    CentralStorageConfig storageConfig = getCentralStorageConfig(session);
    dropTableIfExists("trace_tn_slow_count_partial");
    dropTableIfExists("trace_tn_slow_point_partial");
    session.createTableWithTWCS("create table if not exists trace_tn_slow_count_partial"
            + " (agent_rollup varchar, transaction_type varchar, transaction_name varchar,"
            + " capture_time timestamp, agent_id varchar, trace_id varchar, primary key"
            + " ((agent_rollup, transaction_type, transaction_name), capture_time, agent_id," + " trace_id))",
            storageConfig.traceExpirationHours(), false, true);
    session.createTableWithTWCS("create table if not exists trace_tn_slow_point_partial"
            + " (agent_rollup varchar, transaction_type varchar, transaction_name varchar,"
            + " capture_time timestamp, agent_id varchar, trace_id varchar, duration_nanos"
            + " bigint, error boolean, headline varchar, user varchar, attributes blob, primary"
            + " key ((agent_rollup, transaction_type, transaction_name), capture_time,"
            + " agent_id, trace_id))", storageConfig.traceExpirationHours(), false, true);
    PreparedStatement insertCountPartialPS = session.prepare(
            "insert into" + " trace_tn_slow_count_partial (agent_rollup, transaction_type, transaction_name,"
                    + " capture_time, agent_id, trace_id) values (?, ?, ?, ?, ?, ?) using ttl ?");
    PreparedStatement insertPointPartialPS = session.prepare(
            "insert into" + " trace_tn_slow_point_partial (agent_rollup, transaction_type, transaction_name,"
                    + " capture_time, agent_id, trace_id, duration_nanos, error, headline, user,"
                    + " attributes) values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) using ttl ?");
    int ttl = getCentralStorageConfig(session).getTraceTTL();
    ResultSet results = session.read("select agent_rollup, transaction_type,"
            + " transaction_name, capture_time, agent_id, trace_id, duration_nanos, error,"
            + " headline, user, attributes, partial from trace_tn_slow_point");
    Queue<ListenableFuture<?>> futures = new ArrayDeque<>();
    Stopwatch stopwatch = Stopwatch.createStarted();
    int rowCount = 0;
    for (Row row : results) {
        if (!row.getBool(11)) { // partial
            // unfortunately cannot use "where partial = true allow filtering" in the query
            // above as that leads to ReadTimeoutException
            continue;
        }/*  w w  w.j a v a  2  s  .  co m*/
        BoundStatement boundStatement = insertCountPartialPS.bind();
        int i = 0;
        copyString(row, boundStatement, i++); // agent_rollup
        copyString(row, boundStatement, i++); // transaction_type
        copyString(row, boundStatement, i++); // transaction_name
        Date captureDate = checkNotNull(row.getTimestamp(i));
        int adjustedTTL = Common.getAdjustedTTL(ttl, captureDate.getTime(), clock);
        copyTimestamp(row, boundStatement, i++); // capture_time
        copyString(row, boundStatement, i++); // agent_id
        copyString(row, boundStatement, i++); // trace_id
        boundStatement.setInt(i++, adjustedTTL);
        futures.add(session.writeAsync(boundStatement));

        boundStatement = insertPointPartialPS.bind();
        i = 0;
        copyString(row, boundStatement, i++); // agent_rollup
        copyString(row, boundStatement, i++); // transaction_type
        copyString(row, boundStatement, i++); // transaction_name
        copyTimestamp(row, boundStatement, i++); // capture_time
        copyString(row, boundStatement, i++); // agent_id
        copyString(row, boundStatement, i++); // trace_id
        copyLong(row, boundStatement, i++); // duration_nanos
        copyBool(row, boundStatement, i++); // error
        copyString(row, boundStatement, i++); // headline
        copyString(row, boundStatement, i++); // user
        copyBytes(row, boundStatement, i++); // attributes
        boundStatement.setInt(i++, adjustedTTL);
        futures.add(session.writeAsync(boundStatement));

        rowCount++;
        if (stopwatch.elapsed(SECONDS) > 60) {
            logger.info("processed {} records", rowCount);
            stopwatch.reset().start();
        }
        waitForSome(futures);
    }
    MoreFutures.waitForAll(futures);
    logger.info(
            "populating trace_tn_slow_count_partial and trace_tn_slow_point_partial tables" + " - complete");
}