Example usage for com.google.common.base Stopwatch reset

List of usage examples for com.google.common.base Stopwatch reset

Introduction

In this page you can find the example usage for com.google.common.base Stopwatch reset.

Prototype

public Stopwatch reset() 

Source Link

Document

Sets the elapsed time for this stopwatch to zero, and places it in a stopped state.

Usage

From source file:io.cloudex.framework.components.Processor.java

@Override
public void run() throws IOException {

    String status = null;//from   ww  w. j  a  v  a 2  s  . c om
    Stopwatch stopwatch = Stopwatch.createUnstarted();

    VmMetaData metaData = this.getMetaData();
    CloudService cloudService = this.getCloudService();

    while (true) {
        try {
            // only process tasks if the status is empty
            if (StringUtils.isBlank(status)) {

                // set status to BUSY
                metaData.setProcessorStatus(ProcessorStatus.BUSY);
                cloudService.updateMetadata(metaData);

                // run the task
                Task task = taskFactory.getTask(metaData, cloudService);
                if (task != null) {
                    stopwatch.start();
                    log.info("Starting processor task: " + task);

                    task.run();

                    log.info("TIMER# Task " + task + " completed in: " + stopwatch);
                    stopwatch.reset();

                } else {
                    //no task is set, just set status to ready and wait for tasks
                    log.info("No task is set!");
                }

                // finished processing
                // blank the task type and set the status to READY
                metaData.clearValues();
                metaData.setProcessorStatus(ProcessorStatus.READY);

                cloudService.updateMetadata(metaData);

            } else {
                log.info("will continue waiting for instructions as status is currently: " + status);
            }

            // now wait for any change in the metadata
            log.info("Waiting for new instructions from the Coordinator");

            // FIXME better solution for race condition
            // avoid race condition
            ApiUtils.block(2);
            metaData = cloudService.getMetaData(false);
            // if we still have a status then wait, otherwise proceed
            if (StringUtils.isNotBlank(metaData.getStatus())) {
                metaData = cloudService.getMetaData(true);
            }

            // check the status in the metadata
            status = metaData.getStatus();

        } catch (Exception e) {

            log.error("An error has occurred whilst running/waiting for tasks, setting status to ERROR", e);
            // the stopwatch wasn't stopped when an exception was thrown
            stopwatch.reset();
            // try to update the Metadata to a fail status
            try {

                metaData = cloudService.getMetaData(false);
                // blank the task type and set the status to ERROR
                metaData.clearValues();
                metaData.exceptionToCloudExError(e);
                cloudService.updateMetadata(metaData);

                // wait until we get further instructions
                // now wait for any change in the metadata
                log.info("Waiting for new instructions from the Coordinator");
                metaData = cloudService.getMetaData(true);
                status = metaData.getStatus();

            } catch (Exception e1) {
                // all has failed with no hope of recovery, retry a few times then terminate
                log.fatal("An error has occurred whilst trying to recover", e);
                // self terminate :-(
                // FIXME uncomment once testing is thoroughly done
                //this.service.shutdownInstance();
            }
        }

        if (this.stop) {
            break;
        }
    }

}

From source file:com.thinkbiganalytics.feedmgr.nifi.CreateFeedBuilder.java

/**
 * Updates a process groups properties/*from  w w w. j a v  a  2 s. c  o  m*/
 */
private void updateProcessGroupProperties(String processGroupId, String processGroupName)
        throws FeedCreationException {
    Stopwatch stopwatch = Stopwatch.createStarted();
    List<NifiProperty> propertiesToUpdate = restClient.getPropertiesForProcessGroup(processGroupId);
    stopwatch.stop();
    log.debug("Time to get Properties in Feed updateProcessGroupProperties: {} ms",
            stopwatch.elapsed(TimeUnit.MILLISECONDS));

    stopwatch.reset();
    stopwatch.start();
    //get the Root processGroup
    ProcessGroupDTO rootProcessGroup = niFiObjectCache.getRootProcessGroup();
    stopwatch.stop();
    log.debug("Time to get root Process Group in updateProcessGroupProperties: {} ms",
            stopwatch.elapsed(TimeUnit.MILLISECONDS));
    stopwatch.reset();

    stopwatch.start();
    modifiedProperties = new ArrayList<>();
    //resolve the static properties
    //first fill in any properties with static references
    List<NifiProperty> modifiedStaticProperties = propertyExpressionResolver
            .resolveStaticProperties(propertiesToUpdate);
    // now apply any of the incoming metadata properties to this

    List<NifiProperty> modifiedFeedMetadataProperties = NifiPropertyUtil.matchAndSetPropertyValues(
            rootProcessGroup.getName(), processGroupName, propertiesToUpdate, properties);
    modifiedProperties.addAll(modifiedStaticProperties);
    modifiedProperties.addAll(modifiedFeedMetadataProperties);

    stopwatch.stop();
    log.debug("Time to set modifiedProperties: {} ms", stopwatch.elapsed(TimeUnit.MILLISECONDS));
    stopwatch.reset();

    stopwatch.start();
    restClient.updateProcessGroupProperties(modifiedProperties);
    stopwatch.stop();
    log.debug("Time to update properties in the process group: {} ms",
            stopwatch.elapsed(TimeUnit.MILLISECONDS));

}

From source file:org.apache.drill.exec.store.parquet.Metadata.java

/**
 * Get the parquet metadata for the parquet files in a directory
 *
 * @param path the path of the directory
 * @return//from   www  . jav a  2s  .  com
 * @throws IOException
 */
private ParquetTableMetadata_v2 getParquetTableMetadata(String path) throws IOException {
    Path p = new Path(path);
    FileStatus fileStatus = fs.getFileStatus(p);
    final Stopwatch watch = Stopwatch.createStarted();
    List<FileStatus> fileStatuses = getFileStatuses(fileStatus);
    logger.info("Took {} ms to get file statuses", watch.elapsed(TimeUnit.MILLISECONDS));
    watch.reset();
    watch.start();
    ParquetTableMetadata_v2 metadata_v1 = getParquetTableMetadata(fileStatuses);
    logger.info("Took {} ms to read file metadata", watch.elapsed(TimeUnit.MILLISECONDS));
    return metadata_v1;
}

From source file:ezbake.groups.service.caching.RedisCacheLayer.java

/**
 * Run an update for the given query//w  ww  . j  a v  a  2  s.  co  m
 *
 * This uses optimistic locking to ensure that the update operation is successful. It will make up to
 * OPTIMISTIC_MAX_TRIES_TO_SET attempts, and will run the query again after each failed update
 *
 * @param jedis redis client
 * @param query query to run
 * @return the collection that was returned by the query
 * @throws Exception
 */
private Set<Long> forceUpdate(Jedis jedis, Queryable<Set<Long>> query) throws Exception {
    String key = query.getKey();

    Stopwatch timer = getStopwatch();

    Set<Long> value = null;
    int tries = 0;
    while (tries < OPTIMISTIC_MAX_TRIES_TO_SET) {
        // Watch key for modifications while querying database
        jedis.watch(key);

        try {
            value = query.runQuery();
        } catch (Exception e) {
            logger.error("Cache failed to run query to populate values", e);
            jedis.unwatch();
            throw e;
        }

        Collection<String> saddAuths = query.transformToCachable(value);

        // Atomic delete and update of set type
        Transaction multi = jedis.multi();
        multi.del(key);
        multi.sadd(key, saddAuths.toArray(new String[saddAuths.size()]));
        multi.set(getSignatureKey(key), checksumCompute.getChecksumSignature(value, key));

        // If successful... return auths
        if (multi.exec() != null) {
            break;
        }
        logStopwatch(timer, "Failed Optimistic Lock. Attempt: %d", tries);
        timer.reset();
        tries += 1;
    }
    return value;
}

From source file:com.google.api.control.Client.java

/**
 * Process a check request./*from  w  ww  .j  a v a  2s .co  m*/
 *
 * The {@code req} is first passed to the {@code CheckAggregator}. If there is a valid cached
 * response, that is returned, otherwise a response is obtained from the transport.
 *
 * @param req a {@link CheckRequest}
 * @return a {@link CheckResponse} or {@code null} if none was cached and there was a transport
 *         failure
 */
public @Nullable CheckResponse check(CheckRequest req) {
    Preconditions.checkState(running, "Cannot check if it's not running");
    statistics.totalChecks.incrementAndGet();
    Stopwatch w = Stopwatch.createStarted(ticker);
    CheckResponse resp = checkAggregator.check(req);
    statistics.totalCheckCacheLookupTimeMillis.addAndGet(w.elapsed(TimeUnit.MILLISECONDS));
    if (resp != null) {
        statistics.checkHits.incrementAndGet();
        if (log.isLoggable(Level.FINER)) {
            log.log(Level.FINER, String.format("using cached check response for %s: %s", req, resp));
        }
        return resp;
    }

    // Application code should not fail (or be blocked) because check request's do not succeed.
    // Instead they should fail open so here just simply log the error and return None to indicate
    // that no response was obtained.
    try {
        w.reset().start();
        resp = transport.services().check(serviceName, req).execute();
        statistics.totalCheckTransportTimeMillis.addAndGet(w.elapsed(TimeUnit.MILLISECONDS));
        checkAggregator.addResponse(req, resp);
        return resp;
    } catch (IOException e) {
        log.log(Level.SEVERE, String.format("direct send of a check request %s failed because of %s", req, e));
        return null;
    }
}

From source file:org.apache.drill.fmpp.mojo.FMPPMojo.java

@Override
public void execute() throws MojoExecutionException, MojoFailureException {
    if (project == null) {
        throw new MojoExecutionException("This plugin can only be used inside a project.");
    }/*ww  w.  j  a v  a  2  s  .co m*/
    String outputPath = output.getAbsolutePath();
    if ((!output.exists() && !output.mkdirs()) || !output.isDirectory()) {
        throw new MojoFailureException("can not write to output dir: " + outputPath);
    }
    String templatesPath = templates.getAbsolutePath();
    if (!templates.exists() || !templates.isDirectory()) {
        throw new MojoFailureException("templates not found in dir: " + outputPath);
    }

    // add the output directory path to the project source directories
    switch (scope) {
    case "compile":
        project.addCompileSourceRoot(outputPath);
        break;
    case "test":
        project.addTestCompileSourceRoot(outputPath);
        break;
    default:
        throw new MojoFailureException("scope must be compile or test");
    }

    final Stopwatch sw = Stopwatch.createStarted();
    try {
        getLog().info(format("Freemarker generation:\n scope: %s,\n config: %s,\n templates: %s", scope,
                config.getAbsolutePath(), templatesPath));
        final File tmp = Files.createTempDirectory("freemarker-tmp").toFile();
        String tmpPath = tmp.getAbsolutePath();
        final String tmpPathNormalized = tmpPath.endsWith(File.separator) ? tmpPath : tmpPath + File.separator;
        Settings settings = new Settings(new File("."));
        settings.set("sourceRoot", templatesPath);
        settings.set("outputRoot", tmp.getAbsolutePath());
        settings.load(config);
        settings.addProgressListener(new TerseConsoleProgressListener());
        settings.addProgressListener(new ProgressListener() {
            @Override
            public void notifyProgressEvent(Engine engine, int event, File src, int pMode, Throwable error,
                    Object param) throws Exception {
                if (event == EVENT_END_PROCESSING_SESSION) {
                    getLog().info(format("Freemarker generation took %dms", sw.elapsed(TimeUnit.MILLISECONDS)));
                    sw.reset();
                    Report report = moveIfChanged(tmp, tmpPathNormalized);
                    if (!tmp.delete()) {
                        throw new MojoFailureException(format("can not delete %s", tmp));
                    }
                    getLog().info(
                            format("Incremental output update took %dms", sw.elapsed(TimeUnit.MILLISECONDS)));
                    getLog().info(format("new: %d", report.newFiles));
                    getLog().info(format("changed: %d", report.changedFiles));
                    getLog().info(format("unchanged: %d", report.unchangedFiles));
                }
            }
        });
        settings.execute();
    } catch (Exception e) {
        throw new MojoFailureException(MiscUtil.causeMessages(e), e);
    }
}

From source file:com.google.api.control.ControlFilter.java

@Override
public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain)
        throws IOException, ServletException {
    if (client == null) {
        log.log(Level.INFO, String.format("No control client was created - skipping service control"));
        chain.doFilter(request, response);
        return;/*w  ww.j a  va2 s .co  m*/
    }
    if (projectId == null) {
        log.log(Level.INFO, String.format("No project Id was specified - skipping service control"));
        chain.doFilter(request, response);
        return;
    }

    // Start tracking the latency
    LatencyTimer timer = new LatencyTimer(ticker);

    // Service Control is not required for this method, execute the rest of
    // the filter chain
    MethodRegistry.Info info = ConfigFilter.getMethodInfo(request);
    HttpServletRequest httpRequest = (HttpServletRequest) request;
    if (info == null) {
        if (log.isLoggable(Level.FINE)) {
            log.log(Level.FINE, String.format("no method corresponds to %s - skipping service control",
                    httpRequest.getRequestURI()));
        }
        chain.doFilter(request, response);
        return;
    }

    // Internal stats tracking
    Stopwatch creationTimer = Stopwatch.createUnstarted(ticker);
    Stopwatch overallTimer = Stopwatch.createStarted(ticker);

    // Perform the check
    AppStruct appInfo = new AppStruct();
    appInfo.httpMethod = httpRequest.getMethod();
    appInfo.requestSize = httpRequest.getContentLength();
    appInfo.url = httpRequest.getRequestURI();
    CheckRequestInfo checkInfo = createCheckInfo(httpRequest, appInfo.url, info);
    CheckErrorInfo errorInfo;
    CheckResponse checkResponse = null;
    if (Strings.isNullOrEmpty(checkInfo.getApiKey()) && !info.shouldAllowUnregisteredCalls()) {
        errorInfo = CheckErrorInfo.API_KEY_NOT_PROVIDED;
        if (log.isLoggable(Level.FINE)) {
            log.log(Level.FINE, String.format("no api key was provided"));
        }
    } else {
        creationTimer.reset().start();
        CheckRequest checkRequest = checkInfo.asCheckRequest(clock);
        statistics.totalChecks.incrementAndGet();
        statistics.totalCheckCreationTime.addAndGet(creationTimer.elapsed(TimeUnit.MILLISECONDS));
        if (log.isLoggable(Level.FINE)) {
            log.log(Level.FINE, String.format("checking using %s", checkRequest));
        }
        checkResponse = client.check(checkRequest);
        errorInfo = CheckErrorInfo.convert(checkResponse);
    }

    // Handle check failures. This includes check transport failures, in
    // which case the checkResponse is null.
    if (errorInfo != CheckErrorInfo.OK) {
        log.log(Level.WARNING, String.format("the check did not succeed; the response %s", checkResponse));

        // ensure the report request is created with updated api_key validity, as this determines
        // the consumer id
        checkInfo.setApiKeyValid(!errorInfo.isApiKeyError());
        appInfo.responseCode = errorInfo.getHttpCode();

        // 'Send' a report, end the latency timer to collect correct overhead and backend latencies
        timer.end();
        ReportRequest reportRequest = createReportRequest(info, checkInfo, appInfo,
                ConfigFilter.getReportRule(request), timer);
        if (log.isLoggable(Level.FINEST)) {
            log.log(Level.FINEST, String.format("sending an error report request %s", reportRequest));
        }
        client.report(reportRequest);

        if (errorInfo == CheckErrorInfo.API_KEY_NOT_PROVIDED) {
            // a needed API key was not provided
            HttpServletResponse httpResponse = (HttpServletResponse) response;
            httpResponse.sendError(errorInfo.getHttpCode(), errorInfo.getMessage());
        } else if (checkResponse == null) {
            // the check did not complete: 'fail open'
            chain.doFilter(request, response);
        } else {
            // the checked failed: assume that any error information will be in the first check error
            HttpServletResponse httpResponse = (HttpServletResponse) response;
            httpResponse.sendError(errorInfo.getHttpCode(),
                    errorInfo.fullMessage(projectId, checkResponse.getCheckErrors(0).getDetail()));
        }
        statistics.totalFiltered.incrementAndGet();
        statistics.totalFilteredTime.addAndGet(overallTimer.elapsed(TimeUnit.MILLISECONDS));
        logStatistics();
        return;
    }

    // Execute the request in wrapper, capture the response, then write it to the output
    GenericResponseWrapper wrapper = new GenericResponseWrapper((HttpServletResponse) response);
    try {
        timer.appStart();
        chain.doFilter(request, wrapper);
    } finally {
        timer.end();
        ServletOutputStream out = response.getOutputStream();
        out.write(wrapper.getData());
        out.close();
    }

    // Send a report
    appInfo.responseCode = wrapper.getResponseCode();
    appInfo.responseSize = wrapper.getContentLength() != 0 ? wrapper.getContentLength()
            : wrapper.getData().length;
    creationTimer.reset().start();
    ReportRequest reportRequest = createReportRequest(info, checkInfo, appInfo,
            ConfigFilter.getReportRule(request), timer);
    statistics.totalReports.incrementAndGet();
    statistics.totalReportCreationTime.addAndGet(creationTimer.elapsed(TimeUnit.MILLISECONDS));
    if (log.isLoggable(Level.FINEST)) {
        log.log(Level.FINEST, String.format("sending a report request %s", reportRequest));
    }
    client.report(reportRequest);
    statistics.totalFiltered.incrementAndGet();
    statistics.totalFilteredTime.addAndGet(overallTimer.elapsed(TimeUnit.MILLISECONDS));
    logStatistics();
}

From source file:org.apache.jackrabbit.oak.upgrade.RepositoryUpgrade.java

/**
 * Copies the full content from the source to the target repository.
 * <p>/*from   ww  w  . j  a v  a 2s  .c  o m*/
 * The source repository <strong>must not be modified</strong> while
 * the copy operation is running to avoid an inconsistent copy.
 * <p>
 * Note that both the source and the target repository must be closed
 * during the copy operation as this method requires exclusive access
 * to the repositories.
 *
 * @param initializer optional extra repository initializer to use
 * @throws RepositoryException if the copy operation fails
 */
public void copy(RepositoryInitializer initializer) throws RepositoryException {
    RepositoryConfig config = source.getRepositoryConfig();
    logger.info("Copying repository content from {} to Oak", config.getHomeDir());
    try {
        NodeBuilder targetBuilder = target.getRoot().builder();
        final Root upgradeRoot = new UpgradeRoot(targetBuilder);

        String workspaceName = source.getRepositoryConfig().getDefaultWorkspaceName();
        SecurityProviderImpl security = new SecurityProviderImpl(mapSecurityConfig(config.getSecurityConfig()));

        if (skipInitialization) {
            logger.info("Skipping the repository initialization");
        } else {
            // init target repository first
            logger.info("Initializing initial repository content from {}", config.getHomeDir());
            new InitialContent().initialize(targetBuilder);
            if (initializer != null) {
                initializer.initialize(targetBuilder);
            }
            logger.debug("InitialContent completed from {}", config.getHomeDir());

            for (SecurityConfiguration sc : security.getConfigurations()) {
                RepositoryInitializer ri = sc.getRepositoryInitializer();
                ri.initialize(targetBuilder);
                logger.debug("Repository initializer '" + ri.getClass().getName() + "' completed",
                        config.getHomeDir());
            }
            for (SecurityConfiguration sc : security.getConfigurations()) {
                WorkspaceInitializer wi = sc.getWorkspaceInitializer();
                wi.initialize(targetBuilder, workspaceName);
                logger.debug("Workspace initializer '" + wi.getClass().getName() + "' completed",
                        config.getHomeDir());
            }
        }

        HashBiMap<String, String> uriToPrefix = HashBiMap.create();
        logger.info("Copying registered namespaces");
        copyNamespaces(targetBuilder, uriToPrefix);
        logger.debug("Namespace registration completed.");

        if (skipInitialization) {
            logger.info("Skipping registering node types and privileges");
        } else {
            logger.info("Copying registered node types");
            NodeTypeManager ntMgr = new ReadWriteNodeTypeManager() {
                @Override
                protected Tree getTypes() {
                    return upgradeRoot.getTree(NODE_TYPES_PATH);
                }

                @Nonnull
                @Override
                protected Root getWriteRoot() {
                    return upgradeRoot;
                }
            };
            copyNodeTypes(ntMgr, new ValueFactoryImpl(upgradeRoot, NamePathMapper.DEFAULT));
            logger.debug("Node type registration completed.");

            // migrate privileges
            logger.info("Copying registered privileges");
            PrivilegeConfiguration privilegeConfiguration = security
                    .getConfiguration(PrivilegeConfiguration.class);
            copyCustomPrivileges(
                    privilegeConfiguration.getPrivilegeManager(upgradeRoot, NamePathMapper.DEFAULT));
            logger.debug("Privilege registration completed.");

            // Triggers compilation of type information, which we need for
            // the type predicates used by the bulk  copy operations below.
            new TypeEditorProvider(false).getRootEditor(targetBuilder.getBaseState(),
                    targetBuilder.getNodeState(), targetBuilder, null);
        }

        final NodeState reportingSourceRoot = ReportingNodeState.wrap(
                JackrabbitNodeState.createRootNodeState(source, workspaceName, targetBuilder.getNodeState(),
                        uriToPrefix, copyBinariesByReference, skipOnError),
                new LoggingReporter(logger, "Migrating", 10000, -1));
        final NodeState sourceRoot;
        if (skipLongNames) {
            sourceRoot = NameFilteringNodeState.wrap(reportingSourceRoot);
        } else {
            sourceRoot = reportingSourceRoot;
        }

        final Stopwatch watch = Stopwatch.createStarted();

        logger.info("Copying workspace content");
        copyWorkspace(sourceRoot, targetBuilder, workspaceName);
        targetBuilder.getNodeState(); // on TarMK this does call triggers the actual copy
        logger.info("Upgrading workspace content completed in {}s ({})", watch.elapsed(TimeUnit.SECONDS),
                watch);

        if (!versionCopyConfiguration.skipOrphanedVersionsCopy()) {
            logger.info("Copying version storage");
            watch.reset().start();
            copyVersionStorage(sourceRoot, targetBuilder, versionCopyConfiguration);
            targetBuilder.getNodeState(); // on TarMK this does call triggers the actual copy
            logger.info("Version storage copied in {}s ({})", watch.elapsed(TimeUnit.SECONDS), watch);
        } else {
            logger.info("Skipping the version storage as the copyOrphanedVersions is set to false");
        }

        watch.reset().start();
        logger.info("Applying default commit hooks");
        // TODO: default hooks?
        List<CommitHook> hooks = newArrayList();

        UserConfiguration userConf = security.getConfiguration(UserConfiguration.class);
        String groupsPath = userConf.getParameters().getConfigValue(UserConstants.PARAM_GROUP_PATH,
                UserConstants.DEFAULT_GROUP_PATH);

        // hooks specific to the upgrade, need to run first
        hooks.add(new EditorHook(new CompositeEditorProvider(new RestrictionEditorProvider(),
                new GroupEditorProvider(groupsPath),
                // copy referenced version histories
                new VersionableEditor.Provider(sourceRoot, workspaceName, versionCopyConfiguration),
                new SameNameSiblingsEditor.Provider())));

        // this editor works on the VersionableEditor output, so it can't be
        // a part of the same EditorHook
        hooks.add(new EditorHook(new VersionablePropertiesEditor.Provider()));

        // security-related hooks
        for (SecurityConfiguration sc : security.getConfigurations()) {
            hooks.addAll(sc.getCommitHooks(workspaceName));
        }

        if (customCommitHooks != null) {
            hooks.addAll(customCommitHooks);
        }

        markIndexesToBeRebuilt(targetBuilder);

        // type validation, reference and indexing hooks
        hooks.add(new EditorHook(
                new CompositeEditorProvider(createTypeEditorProvider(), createIndexEditorProvider())));

        target.merge(targetBuilder, new LoggingCompositeHook(hooks, source, overrideEarlyShutdown()),
                CommitInfo.EMPTY);
        logger.info("Processing commit hooks completed in {}s ({})", watch.elapsed(TimeUnit.SECONDS), watch);
        logger.debug("Repository upgrade completed.");
    } catch (Exception e) {
        throw new RepositoryException("Failed to copy content", e);
    }
}

From source file:org.caleydo.core.util.clusterer.algorithm.tree.TreeClusterer.java

@Override
protected PerspectiveInitializationData cluster() {
    int r = 0;/*from w ww .j  a v  a  2 s  .c om*/

    Stopwatch w = new Stopwatch().start();
    r = determineSimilarities();
    System.out.println("determine similarties: " + w);
    w.stop().reset();
    if (r < 0) {
        progress(100);
        return null;
    }

    TreeClusterConfiguration tConfig = (TreeClusterConfiguration) config.getClusterAlgorithmConfiguration();

    Node[] result;

    w.start();
    switch (tConfig.getTreeClustererAlgo()) {
    case COMPLETE_LINKAGE:
        result = pmlcluster();
        System.out.println("pmlcluster: " + w);
        break;
    case AVERAGE_LINKAGE:

        result = palcluster();
        System.out.println("palcluster: " + w);
        break;
    case SINGLE_LINKAGE:
        result = pslcluster();
        System.out.println("pslcluster: " + w);
        break;
    default:
        throw new IllegalStateException("Unkonwn cluster type: " + tConfig.getTreeClustererAlgo());
    }
    if (result == null)
        return null;
    w.reset().start();
    PerspectiveInitializationData p = convert(result);
    System.out.println("convert: " + w);
    return p;
}

From source file:graph.features.cpp.OpenCPP.java

private OpenCPPSolution<Box<T>> bestSolution(final UndirectedGraph<Box<T>> boxedGraph,
        final T startingMazeNode) {

    OpenCPPSolution<Box<T>> bestSolution = new OpenCPPSolution<Box<T>>(null, null, null, null,
            2 * this.getLowerBoundCost() * 2);

    final Stopwatch stopwatch = new Stopwatch();

    final DegreeInterface<T> degreeInterface = this.getGraph().fetch(DegreeFeature.class).up();

    //final int i = 0;
    for (final T oddVertice : degreeInterface.getNodesWithOddDegree().keySet()) {
        stopwatch.start();/*from  w ww .j  av  a  2s .co m*/
        final UndirectedGraph<Box<T>> virtualGraph = this.buildVirtualGraph(boxedGraph, startingMazeNode,
                oddVertice);

        //final ClosedCPP<Box<T>> cppSolver = ClosedCPP.from(virtualGraph);
        final ClosedCPPInterface<Box<T>> closedCPPInterface = virtualGraph.fetch(ClosedCPPFeature.class).up();

        final ClosedCPPSolution<Box<T>> cppSolution = closedCPPInterface.solve();
        if (cppSolution.getUpperBoundCost() < bestSolution.getUpperBoundCost()) {
            bestSolution = new OpenCPPSolution<Box<T>>(new Box<T>(oddVertice), virtualGraph,
                    cppSolution.getTraversalByEdge(), cppSolution.getLowerBoundCost(),
                    cppSolution.getUpperBoundCost());
        }
        /*
        System.out.println();
        System.out.println(++i + "/" + this.oddVertices.size() + " : " + stopwatch.elapsedTime(TimeUnit.MILLISECONDS) + " " + TimeUnit.MILLISECONDS);
        System.out.println(oddVertice + " -> " + cppSolver.getUpperBoundCost() + "$");
        System.out.println();
        */
        stopwatch.reset();
    }

    return bestSolution;

}