Example usage for com.google.common.base Stopwatch stop

List of usage examples for com.google.common.base Stopwatch stop

Introduction

In this page you can find the example usage for com.google.common.base Stopwatch stop.

Prototype

public Stopwatch stop() 

Source Link

Document

Stops the stopwatch.

Usage

From source file:uk.ac.open.kmi.iserve.discovery.disco.impl.SparqlLogicConceptMatcher.java

private Table<URI, URI, MatchResult> queryForMatchResults(String queryStr) {

    ImmutableTable.Builder<URI, URI, MatchResult> result = ImmutableTable.builder();

    // Query the engine
    Query query = QueryFactory.create(queryStr);
    QueryExecution qe = QueryExecutionFactory.sparqlService(this.sparqlEndpoint.toASCIIString(), query);
    MonitoredQueryExecution qexec = new MonitoredQueryExecution(qe);
    try {/* w  w w. j  av  a2  s.  co m*/
        Stopwatch stopwatch = new Stopwatch().start();
        ResultSet qResults = qexec.execSelect();
        stopwatch.stop();
        log.debug("Time taken for querying the registry: {}", stopwatch);

        // Obtain matches if any and figure out the type
        MatchType type;
        URI origin;
        URI destination;
        while (qResults.hasNext()) {
            QuerySolution soln = qResults.nextSolution();
            // Only process if we can get complete match information
            if (soln.contains(ORIGIN_VAR) && soln.contains(DESTINATION_VAR)) {
                type = getMatchType(soln);
                origin = new URI(soln.getResource(ORIGIN_VAR).getURI());
                destination = new URI(soln.getResource(DESTINATION_VAR).getURI());
                log.debug("Concept {} was matched to {} with type {}", origin, destination, type);
                result.put(origin, destination, new AtomicMatchResult(origin, destination, type, this));
            }
        }

    } catch (URISyntaxException e) {
        log.error("Error creating URI for match results", e);
    } finally {
        qexec.close();
    }

    return result.build();
}

From source file:demos.AsynchronousInsert.java

@Override
public void run() {
    try {/*from  w w w  .  java  2 s .  co m*/
        logger.info("Preparing to insert metric data points");

        Cluster cluster = Cluster.builder().addContactPoint("127.0.0.1").build();
        Session session = cluster.connect("demo");
        PreparedStatement insert = session
                .prepare("insert into metric_data (metric_id, time, value) values (?, ?, ?)");
        Random random = new Random();
        DateTime time = DateTime.now().minusYears(1);
        final CountDownLatch latch = new CountDownLatch(NUM_INSERTS);

        FutureCallback<ResultSet> callback = new FutureCallback<ResultSet>() {
            @Override
            public void onSuccess(ResultSet result) {
                latch.countDown();
            }

            @Override
            public void onFailure(Throwable t) {
                logger.warn("There was an error inserting data", t);
                latch.countDown();
            }
        };

        Stopwatch stopwatch = new Stopwatch().start();
        for (int i = 0; i < NUM_INSERTS; ++i) {
            String metricId = "metric-" + Math.abs(random.nextInt() % NUM_METRICS);
            double value = random.nextDouble();
            ResultSetFuture future = session.executeAsync(insert.bind(metricId, time.toDate(), value));
            time = time.plusSeconds(10);
            Futures.addCallback(future, callback);
        }
        latch.await();
        stopwatch.stop();

        logger.info("Finished inserting {} data points in {} ms", NUM_INSERTS,
                stopwatch.elapsed(TimeUnit.MILLISECONDS));
    } catch (InterruptedException e) {
        logger.info("There was an interrupt while waiting for inserts to complete");
    }
}

From source file:org.apache.hadoop.hbase.ScanPerformanceEvaluation.java

public void testScan() throws IOException {
    Stopwatch tableOpenTimer = new Stopwatch();
    Stopwatch scanOpenTimer = new Stopwatch();
    Stopwatch scanTimer = new Stopwatch();

    tableOpenTimer.start();/*from w w w .  j av  a 2  s .  c om*/
    HTable table = new HTable(getConf(), TableName.valueOf(tablename));
    tableOpenTimer.stop();

    Scan scan = getScan();
    scanOpenTimer.start();
    ResultScanner scanner = table.getScanner(scan);
    scanOpenTimer.stop();

    long numRows = 0;
    long numCells = 0;
    scanTimer.start();
    while (true) {
        Result result = scanner.next();
        if (result == null) {
            break;
        }
        numRows++;

        numCells += result.rawCells().length;
    }
    scanTimer.stop();
    scanner.close();
    table.close();

    ScanMetrics metrics = ProtobufUtil.toScanMetrics(scan.getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_DATA));
    long totalBytes = metrics.countOfBytesInResults.get();
    double throughput = (double) totalBytes / scanTimer.elapsedTime(TimeUnit.SECONDS);
    double throughputRows = (double) numRows / scanTimer.elapsedTime(TimeUnit.SECONDS);
    double throughputCells = (double) numCells / scanTimer.elapsedTime(TimeUnit.SECONDS);

    System.out.println("HBase scan: ");
    System.out.println("total time to open table: " + tableOpenTimer.elapsedMillis() + " ms");
    System.out.println("total time to open scanner: " + scanOpenTimer.elapsedMillis() + " ms");
    System.out.println("total time to scan: " + scanTimer.elapsedMillis() + " ms");

    System.out.println("Scan metrics:\n" + metrics.getMetricsMap());

    System.out.println(
            "total bytes: " + totalBytes + " bytes (" + StringUtils.humanReadableInt(totalBytes) + ")");
    System.out.println("throughput  : " + StringUtils.humanReadableInt((long) throughput) + "B/s");
    System.out.println("total rows  : " + numRows);
    System.out.println("throughput  : " + StringUtils.humanReadableInt((long) throughputRows) + " rows/s");
    System.out.println("total cells : " + numCells);
    System.out.println("throughput  : " + StringUtils.humanReadableInt((long) throughputCells) + " cells/s");
}

From source file:org.cinchapi.concourse.importer.cli.AbstractImportCli.java

@Override
protected final void doTask() {
    ExecutorService executor = Executors.newFixedThreadPool(((ImportOptions) options).numThreads);
    String data = ((ImportOptions) options).data;
    List<String> files = scan(Paths.get(data));
    Stopwatch watch = Stopwatch.createStarted();
    for (final String file : files) {
        executor.execute(new Runnable() {

            @Override//from w ww  .  j  ava 2s.  com
            public void run() {
                doImport(file);
            }

        });
    }
    executor.shutdown();
    while (!executor.isTerminated()) {
        continue; // block until all tasks are completed
    }
    watch.stop();
    TimeUnit unit = TimeUnit.MILLISECONDS;
    System.out.println(MessageFormat.format("Finished import in {0} {1}", watch.elapsed(unit), unit));
}

From source file:com.vmware.photon.controller.apife.backends.DiskSqlBackend.java

/**
 * Create disk in DB using disk spec./*from   w  w w . j  ava  2  s. co  m*/
 */
private BaseDiskEntity create(String projectId, DiskCreateSpec spec) throws ExternalException {
    Stopwatch createWatch = Stopwatch.createStarted();

    ProjectEntity project = projectBackend.findById(projectId);

    String kind = spec.getKind();

    // flavor need to be queried before locality affinity entity is saved, otherwise,
    // hibernate will throw exception for disk entity is not saved yet
    FlavorEntity flavorEntity = flavorBackend.getEntityByNameAndKind(spec.getFlavor(), kind);
    BaseDiskEntity disk;

    switch (kind) {
    case PersistentDisk.KIND:
        PersistentDiskEntity persistentDisk = new PersistentDiskEntity();
        persistentDisk.setAffinities(localityBackend.create(persistentDisk, spec.getAffinities()));
        disk = persistentDisk;
        break;
    case EphemeralDisk.KIND:
        disk = new EphemeralDiskEntity();
        break;
    default:
        throw new IllegalArgumentException("Unknown disk kind: " + kind);
    }

    disk.setState(DiskState.CREATING);
    disk.setName(spec.getName());
    disk.setCapacityGb(spec.getCapacityGb());
    disk.setFlavorId(flavorEntity.getId());

    List<QuotaLineItemEntity> enhancedCost = new ArrayList<>(flavorEntity.getCost());
    String capacityKey = kind + ".capacity";
    QuotaLineItemEntity capacity = new QuotaLineItemEntity(capacityKey, spec.getCapacityGb(), QuotaUnit.GB);
    for (QuotaLineItemEntity qli : enhancedCost) {

        // assert/crash if capacity key is present in a disk entity's static cost
        // this is computed in this code at runtime.
        if (qli.getKey().equals(capacityKey)) {
            checkState(!qli.getKey().equals(capacityKey));
        }
    }
    enhancedCost.add(capacity);
    disk.setCost(enhancedCost);
    disk.setProjectId(project.getId());

    String resourceTickedId = project.getResourceTicketId();

    Stopwatch resourceTicketWatch = Stopwatch.createStarted();
    resourceTicketBackend.consumeQuota(resourceTickedId, new QuotaCost(disk.getCost()));
    resourceTicketWatch.stop();
    logger.info(
            "DiskSqlBackend.create for Disk Id: {}, Kind:{}, resourceTicket {}, consumeQuota in {} milliseconds",
            disk.getId(), kind, resourceTickedId, resourceTicketWatch.elapsed(TimeUnit.MILLISECONDS));

    BaseDiskDao<BaseDiskEntity> diskDao = getDiskDao(spec.getKind());

    createWatch.stop();
    logger.info("DiskSqlBackend.create for Disk Id: {}, Kind:{} took {} milliseconds", disk.getId(), kind,
            createWatch.elapsed(TimeUnit.MILLISECONDS));

    return diskDao.create(disk);
}

From source file:com.googlesource.gerrit.plugins.auditsl4j.WaitForCondition.java

public default boolean waitFor(Supplier<Boolean> condition) {
    Stopwatch stopwatch = Stopwatch.createStarted();
    try {/*w w  w  .jav  a 2  s  . c  o  m*/
        Duration maxWait = waitTimeout();
        Duration sleep = waitInterval();
        boolean conditionSucceeded = condition.get();
        while (!conditionSucceeded && stopwatch.elapsed().compareTo(maxWait) < 0) {
            try {
                Thread.sleep(sleep.toMillis());
            } catch (InterruptedException e) {
            }
            conditionSucceeded = condition.get();
        }
        return conditionSucceeded;
    } finally {
        stopwatch.stop();
    }
}

From source file:org.eclipse.viatra.modelobfuscator.application.common.ModelObfuscatorHeadless.java

/**
 * @param outputDirectory/*from  w w  w . j  av a 2s.co m*/
 * @param inputs
 * @param resourceSet
 */
private void saveObfuscatedModels(File outputDirectory, Map<String, URI> inputs, ResourceSetImpl resourceSet) {
    URI outputDirUri = URI.createFileURI(outputDirectory.getPath());
    for (Entry<String, URI> entry : inputs.entrySet()) {
        URI uri = entry.getValue();
        String fileSegment = uri.lastSegment();
        URI outputUri = outputDirUri.appendSegment(fileSegment);
        Resource resource = resourceSet.getResource(uri, false);
        resource.setURI(outputUri);
        try {
            System.out.println("Saving resource: " + fileSegment);
            Stopwatch stopwatch2 = Stopwatch.createStarted();
            resource.save(null);
            stopwatch2.stop();
            String elapsedTime2 = stopwatch2.elapsed(TimeUnit.MILLISECONDS) + " ms ("
                    + stopwatch2.elapsed(TimeUnit.NANOSECONDS) + " ns)";
            System.out.println("Saved resource: " + fileSegment + " in " + elapsedTime2);
        } catch (IOException e) {
            reportError("Could not save output " + fileSegment);
        }
    }
}

From source file:org.eclipse.viatra.modelobfuscator.application.common.ModelObfuscatorHeadless.java

/**
 * @param inputs//from  w  ww .ja  v a 2s  .  c  o m
 * @param extensionToFactoryMap
 * @return
 */
private ResourceSetImpl loadInputModels(Map<String, URI> inputs, Map<String, Object> extensionToFactoryMap) {
    ResourceSetImpl resourceSet = new ResourceSetImpl();
    for (Entry<String, URI> inputEntry : inputs.entrySet()) {
        URI uri = inputEntry.getValue();
        // XXX we only support XMI resources in this way
        if (!extensionToFactoryMap.containsKey(uri.fileExtension())) {
            extensionToFactoryMap.put(uri.fileExtension(), new EcoreResourceFactoryImpl());
        }
        System.out.println("Loading resource: " + inputEntry.getKey());
        Stopwatch stopwatch = Stopwatch.createStarted();
        resourceSet.getResource(uri, true);
        stopwatch.stop();
        String elapsedTime = stopwatch.elapsed(TimeUnit.MILLISECONDS) + " ms ("
                + stopwatch.elapsed(TimeUnit.NANOSECONDS) + " ns)";
        System.out.println("Loaded resource: " + inputEntry.getKey() + " in " + elapsedTime);
    }
    return resourceSet;
}

From source file:es.usc.citius.composit.wsc08.data.matcher.WSCMatchGraph.java

public WSCMatchGraph(HierarchicalKnowledgeBase kb) {
    this.kb = kb;
    // Build a table using the kb and using exact/plugin match.
    Stopwatch w = Stopwatch.createStarted();
    Table<Concept, Concept, Boolean> table = HashBasedTable.create();
    for (Concept source : kb.getConcepts()) {
        Set<Concept> set = new HashSet<Concept>(kb.getSuperclasses(source));
        set.add(source);/*  ww w . j  a  v a2 s  .  c o m*/
        for (Concept target : set) {
            table.put(source, target, true);
        }
    }
    this.matchGraph = new HashMatchGraph<Concept, Boolean>(new MatchTable<Concept, Boolean>(table));
    logger.debug("MatchGraph computed in {}", w.stop().toString());
}

From source file:org.locationtech.geogig.remote.BinaryPackedObjects.java

/**
 * @return the number of objects written
 *//*from w ww . j  av  a 2 s . c o m*/
public long write(ObjectFunnel funnel, List<ObjectId> want, List<ObjectId> have, Set<ObjectId> sent,
        Callback callback, boolean traverseCommits, Deduplicator deduplicator) throws IOException {

    for (ObjectId i : want) {
        if (!database.exists(i)) {
            throw new NoSuchElementException(format("Wanted commit: '%s' is not known", i));
        }
    }

    LOGGER.info("scanning for previsit list...");
    Stopwatch sw = Stopwatch.createStarted();
    ImmutableList<ObjectId> needsPrevisit = traverseCommits ? scanForPrevisitList(want, have, deduplicator)
            : ImmutableList.copyOf(have);
    LOGGER.info(String.format("Previsit list built in %s for %,d ids: %s. Calculating reachable content ids...",
            sw.stop(), needsPrevisit.size(), needsPrevisit));

    deduplicator.reset();

    sw.reset().start();
    ImmutableList<ObjectId> previsitResults = reachableContentIds(needsPrevisit, deduplicator);
    LOGGER.info(String.format("reachableContentIds took %s for %,d ids", sw.stop(), previsitResults.size()));

    deduplicator.reset();

    LOGGER.info("obtaining post order iterator on range...");
    sw.reset().start();

    Iterator<RevObject> objects = PostOrderIterator.range(want, new ArrayList<ObjectId>(previsitResults),
            database, traverseCommits, deduplicator);
    long objectCount = 0;
    LOGGER.info("PostOrderIterator.range took {}", sw.stop());

    try {
        LOGGER.info("writing objects to remote...");
        while (objects.hasNext()) {
            RevObject object = objects.next();
            funnel.funnel(object);
            objectCount++;
            callback.callback(Suppliers.ofInstance(object));
        }
    } catch (IOException e) {
        String causeMessage = Throwables.getRootCause(e).getMessage();
        LOGGER.info(String.format("writing of objects failed after %,d objects. Cause: '%s'", objectCount,
                causeMessage));
        throw e;
    }
    return objectCount;
}