Example usage for com.google.common.base Stopwatch elapsed

List of usage examples for com.google.common.base Stopwatch elapsed

Introduction

In this page you can find the example usage for com.google.common.base Stopwatch elapsed.

Prototype

@CheckReturnValue
public long elapsed(TimeUnit desiredUnit) 

Source Link

Document

Returns the current elapsed time shown on this stopwatch, expressed in the desired time unit, with any fraction rounded down.

Usage

From source file:org.jboss.hal.meta.processing.MetadataProcessor.java

@SuppressWarnings("unchecked")
private void processInternal(Set<AddressTemplate> templates, boolean recursive, Progress progress,
        AsyncCallback<Void> callback) {
    // we can skip the tasks if the metadata is already in the regisries
    LookupRegistryTask lookupRegistries = new LookupRegistryTask(resourceDescriptionRegistry,
            securityContextRegistry);/*w  w  w  .ja  v a 2  s.  c  o m*/
    if (lookupRegistries.allPresent(templates, recursive)) {
        logger.debug("All metadata have been already processed -> callback.onSuccess(null)");
        callback.onSuccess(null);

    } else {
        boolean ie = Browser.isIE();
        List<Task<LookupContext>> tasks = new ArrayList<>();
        tasks.add(lookupRegistries);
        if (!ie) {
            tasks.add(new LookupDatabaseTask(resourceDescriptionDatabase, securityContextDatabase));
        }
        tasks.add(new RrdTask(environment, dispatcher, statementContext, settings, BATCH_SIZE, RRD_DEPTH));
        tasks.add(new UpdateRegistryTask(resourceDescriptionRegistry, securityContextRegistry));
        if (!ie) {
            tasks.add(new UpdateDatabaseTask(workerChannel));
        }

        LookupContext context = new LookupContext(progress, templates, recursive);
        Stopwatch stopwatch = Stopwatch.createStarted();
        series(context, tasks).subscribe(new Outcome<LookupContext>() {
            @Override
            public void onError(LookupContext context, Throwable error) {
                logger.debug("Failed to process metadata: {}", error.getMessage());
                callback.onFailure(error);
            }

            @Override
            public void onSuccess(LookupContext context) {
                stopwatch.stop();
                logger.info("Successfully processed metadata in {} ms", stopwatch.elapsed(MILLISECONDS));
                callback.onSuccess(null);
            }
        });
    }
}

From source file:com.google.devtools.build.android.AndroidDataDeserializer.java

/**
 * Reads the serialized {@link DataKey} and {@link DataValue} to the {@link KeyValueConsumers}.
 *
 * @param inPath The path to the serialized protocol buffer.
 * @param consumers The {@link KeyValueConsumers} for the entries {@link DataKey} -&gt;
 *     {@link DataValue}.//from   w  w  w.j ava 2 s .c  om
 * @throws DeserializationException Raised for an IOException or when the inPath is not a valid
 *     proto buffer.
 */
public void read(Path inPath, KeyValueConsumers consumers) {
    Stopwatch timer = Stopwatch.createStarted();
    try (InputStream in = Files.newInputStream(inPath, StandardOpenOption.READ)) {
        FileSystem currentFileSystem = inPath.getFileSystem();
        Header header = Header.parseDelimitedFrom(in);
        if (header == null) {
            throw new DeserializationException("No Header found in " + inPath);
        }
        readEntriesSegment(consumers, in, currentFileSystem, header);
    } catch (IOException e) {
        throw new DeserializationException(e);
    } finally {
        logger.fine(String.format("Deserialized in merged in %sms", timer.elapsed(TimeUnit.MILLISECONDS)));
    }
}

From source file:appeng.core.AppEng.java

@EventHandler
private void init(final FMLInitializationEvent event) {
    final Stopwatch start = Stopwatch.createStarted();
    AELog.info("Initialization ( started )");

    if (this.exportConfig.isExportingItemNamesEnabled()) {
        final ExportProcess process = new ExportProcess(this.recipeDirectory, this.exportConfig);
        final Thread exportProcessThread = new Thread(process);

        this.startService("AE2 CSV Export", exportProcessThread);
    }//from   w  w w  .j  a  v a2  s .  c  o  m

    this.registration.initialize(event, this.recipeDirectory, this.customRecipeConfig);
    IntegrationRegistry.INSTANCE.init();

    AELog.info("Initialization ( ended after " + start.elapsed(TimeUnit.MILLISECONDS) + "ms )");
}

From source file:io.druid.query.lookup.KafkaLookupExtractorFactory.java

@Override
public boolean start() {
    synchronized (started) {
        if (started.get()) {
            LOG.warn("Already started, not starting again");
            return started.get();
        }//w w w.  j  av a 2s.c  o m
        if (executorService.isShutdown()) {
            LOG.warn("Already shut down, not starting again");
            return false;
        }
        final Properties kafkaProperties = new Properties();
        kafkaProperties.putAll(getKafkaProperties());
        if (kafkaProperties.containsKey("group.id")) {
            throw new IAE(
                    "Cannot set kafka property [group.id]. Property is randomly generated for you. Found [%s]",
                    kafkaProperties.getProperty("group.id"));
        }
        if (kafkaProperties.containsKey("auto.offset.reset")) {
            throw new IAE(
                    "Cannot set kafka property [auto.offset.reset]. Property will be forced to [smallest]. Found [%s]",
                    kafkaProperties.getProperty("auto.offset.reset"));
        }
        Preconditions.checkNotNull(kafkaProperties.getProperty("zookeeper.connect"),
                "zookeeper.connect required property");

        kafkaProperties.setProperty("group.id", factoryId);
        final String topic = getKafkaTopic();
        LOG.debug("About to listen to topic [%s] with group.id [%s]", topic, factoryId);
        final Map<String, String> map = cacheManager.getCacheMap(factoryId);
        mapRef.set(map);
        // Enable publish-subscribe
        kafkaProperties.setProperty("auto.offset.reset", "smallest");

        final CountDownLatch startingReads = new CountDownLatch(1);

        final ListenableFuture<?> future = executorService.submit(new Runnable() {
            @Override
            public void run() {
                while (!executorService.isShutdown()) {
                    consumerConnector = buildConnector(kafkaProperties);
                    try {
                        if (executorService.isShutdown()) {
                            break;
                        }

                        final List<KafkaStream<String, String>> streams = consumerConnector
                                .createMessageStreamsByFilter(new Whitelist(Pattern.quote(topic)), 1,
                                        DEFAULT_STRING_DECODER, DEFAULT_STRING_DECODER);

                        if (streams == null || streams.isEmpty()) {
                            throw new IAE("Topic [%s] had no streams", topic);
                        }
                        if (streams.size() > 1) {
                            throw new ISE("Topic [%s] has %d streams! expected 1", topic, streams.size());
                        }
                        final KafkaStream<String, String> kafkaStream = streams.get(0);

                        startingReads.countDown();

                        for (final MessageAndMetadata<String, String> messageAndMetadata : kafkaStream) {
                            final String key = messageAndMetadata.key();
                            final String message = messageAndMetadata.message();
                            if (key == null || message == null) {
                                LOG.error("Bad key/message from topic [%s]: [%s]", topic, messageAndMetadata);
                                continue;
                            }
                            doubleEventCount.incrementAndGet();
                            map.put(key, message);
                            doubleEventCount.incrementAndGet();
                            LOG.trace("Placed key[%s] val[%s]", key, message);
                        }
                    } catch (Exception e) {
                        LOG.error(e, "Error reading stream for topic [%s]", topic);
                    } finally {
                        consumerConnector.shutdown();
                    }
                }
            }
        });
        Futures.addCallback(future, new FutureCallback<Object>() {
            @Override
            public void onSuccess(Object result) {
                LOG.debug("Success listening to [%s]", topic);
            }

            @Override
            public void onFailure(Throwable t) {
                if (t instanceof CancellationException) {
                    LOG.debug("Topic [%s] cancelled", topic);
                } else {
                    LOG.error(t, "Error in listening to [%s]", topic);
                }
            }
        }, MoreExecutors.sameThreadExecutor());
        this.future = future;
        final Stopwatch stopwatch = Stopwatch.createStarted();
        try {
            while (!startingReads.await(100, TimeUnit.MILLISECONDS) && connectTimeout > 0L) {
                // Don't return until we have actually connected
                if (future.isDone()) {
                    future.get();
                } else {
                    if (stopwatch.elapsed(TimeUnit.MILLISECONDS) > connectTimeout) {
                        throw new TimeoutException("Failed to connect to kafka in sufficient time");
                    }
                }
            }
        } catch (InterruptedException | ExecutionException | TimeoutException e) {
            executorService.shutdown();
            if (!future.isDone() && !future.cancel(false)) {
                LOG.warn("Could not cancel kafka listening thread");
            }
            LOG.error(e, "Failed to start kafka extraction factory");
            cacheManager.delete(factoryId);
            return false;
        }

        started.set(true);
        return true;
    }
}

From source file:org.apache.druid.query.lookup.KafkaLookupExtractorFactory.java

@Override
public boolean start() {
    synchronized (started) {
        if (started.get()) {
            LOG.warn("Already started, not starting again");
            return started.get();
        }/*from  www .  ja  va 2  s.c  o  m*/
        if (executorService.isShutdown()) {
            LOG.warn("Already shut down, not starting again");
            return false;
        }
        final Properties kafkaProperties = new Properties();
        kafkaProperties.putAll(getKafkaProperties());
        if (kafkaProperties.containsKey("group.id")) {
            throw new IAE(
                    "Cannot set kafka property [group.id]. Property is randomly generated for you. Found [%s]",
                    kafkaProperties.getProperty("group.id"));
        }
        if (kafkaProperties.containsKey("auto.offset.reset")) {
            throw new IAE(
                    "Cannot set kafka property [auto.offset.reset]. Property will be forced to [smallest]. Found [%s]",
                    kafkaProperties.getProperty("auto.offset.reset"));
        }
        Preconditions.checkNotNull(kafkaProperties.getProperty("zookeeper.connect"),
                "zookeeper.connect required property");

        kafkaProperties.setProperty("group.id", factoryId);
        final String topic = getKafkaTopic();
        LOG.debug("About to listen to topic [%s] with group.id [%s]", topic, factoryId);
        cacheHandler = cacheManager.createCache();
        final Map<String, String> map = cacheHandler.getCache();
        mapRef.set(map);
        // Enable publish-subscribe
        kafkaProperties.setProperty("auto.offset.reset", "smallest");

        final CountDownLatch startingReads = new CountDownLatch(1);

        final ListenableFuture<?> future = executorService.submit(new Runnable() {
            @Override
            public void run() {
                while (!executorService.isShutdown()) {
                    consumerConnector = buildConnector(kafkaProperties);
                    try {
                        if (executorService.isShutdown()) {
                            break;
                        }

                        final List<KafkaStream<String, String>> streams = consumerConnector
                                .createMessageStreamsByFilter(new Whitelist(Pattern.quote(topic)), 1,
                                        DEFAULT_STRING_DECODER, DEFAULT_STRING_DECODER);

                        if (streams == null || streams.isEmpty()) {
                            throw new IAE("Topic [%s] had no streams", topic);
                        }
                        if (streams.size() > 1) {
                            throw new ISE("Topic [%s] has %d streams! expected 1", topic, streams.size());
                        }
                        final KafkaStream<String, String> kafkaStream = streams.get(0);

                        startingReads.countDown();

                        for (final MessageAndMetadata<String, String> messageAndMetadata : kafkaStream) {
                            final String key = messageAndMetadata.key();
                            final String message = messageAndMetadata.message();
                            if (key == null || message == null) {
                                LOG.error("Bad key/message from topic [%s]: [%s]", topic, messageAndMetadata);
                                continue;
                            }
                            doubleEventCount.incrementAndGet();
                            map.put(key, message);
                            doubleEventCount.incrementAndGet();
                            LOG.trace("Placed key[%s] val[%s]", key, message);
                        }
                    } catch (Exception e) {
                        LOG.error(e, "Error reading stream for topic [%s]", topic);
                    } finally {
                        consumerConnector.shutdown();
                    }
                }
            }
        });
        Futures.addCallback(future, new FutureCallback<Object>() {
            @Override
            public void onSuccess(Object result) {
                LOG.debug("Success listening to [%s]", topic);
            }

            @Override
            public void onFailure(Throwable t) {
                if (t instanceof CancellationException) {
                    LOG.debug("Topic [%s] cancelled", topic);
                } else {
                    LOG.error(t, "Error in listening to [%s]", topic);
                }
            }
        }, MoreExecutors.sameThreadExecutor());
        this.future = future;
        final Stopwatch stopwatch = Stopwatch.createStarted();
        try {
            while (!startingReads.await(100, TimeUnit.MILLISECONDS) && connectTimeout > 0L) {
                // Don't return until we have actually connected
                if (future.isDone()) {
                    future.get();
                } else {
                    if (stopwatch.elapsed(TimeUnit.MILLISECONDS) > connectTimeout) {
                        throw new TimeoutException("Failed to connect to kafka in sufficient time");
                    }
                }
            }
        } catch (InterruptedException | ExecutionException | TimeoutException e) {
            executorService.shutdown();
            if (!future.isDone() && !future.cancel(false)) {
                LOG.warn("Could not cancel kafka listening thread");
            }
            LOG.error(e, "Failed to start kafka extraction factory");
            cacheHandler.close();
            return false;
        }

        started.set(true);
        return true;
    }
}

From source file:tds.dll.common.diagnostic.services.impl.DiagnosticDatabaseServiceImpl.java

private DatabaseOperation writeOperation(LegacyDbNameUtility.Databases dbName) {

    Stopwatch stopwatch = Stopwatch.createStarted();
    try {//from   ww  w  .  java  2 s . c  o m
        switch (dbName) {
        case Archive:
            writeTestDao.writeArchiveDatabase();
            break;
        case Config:
            writeTestDao.writeConfigsDatabase();
            break;
        case Itembank:
            writeTestDao.writeItemBankDatabase();
            break;
        case Session:
            writeTestDao.writeSessionDatabase();
            break;
        }
    } catch (DiagnosticException diagnosticException) {
        stopwatch.stop();
        return new DatabaseOperation(Rating.FAILED, DatabaseOperationType.WRITE,
                stopwatch.elapsed(TimeUnit.MILLISECONDS), diagnosticException.getMessage());
    }
    return new DatabaseOperation(Rating.IDEAL, DatabaseOperationType.WRITE,
            stopwatch.elapsed(TimeUnit.MILLISECONDS));
}

From source file:org.glowroot.agent.central.DownstreamServiceObserver.java

@OnlyUsedByTests
void close() throws InterruptedException {
    StreamObserver<AgentResponse> responseObserver = currResponseObserver;
    while (responseObserver == null) {
        MILLISECONDS.sleep(10);/*w  w w  .  jav  a2  s.co  m*/
        responseObserver = currResponseObserver;
    }
    responseObserver.onCompleted();
    Stopwatch stopwatch = Stopwatch.createStarted();
    while (stopwatch.elapsed(SECONDS) < 10 && !closedByCentralCollector) {
        MILLISECONDS.sleep(10);
    }
    checkState(closedByCentralCollector);
}

From source file:edu.mit.streamjit.impl.distributed.runtimer.OnlineTuner.java

@Override
public void run() {
    int tryCount = 0;
    try {/*from   w ww .  j a v a2s  . com*/
        tuner.startTuner(String.format("lib%sopentuner%sstreamjit%sstreamjit2.py", File.separator,
                File.separator, File.separator));

        tuner.writeLine("program");
        tuner.writeLine(app.name);

        tuner.writeLine("confg");
        String s = getConfigurationString(app.blobConfiguration);
        tuner.writeLine(s);

        System.out.println("New tune run.............");
        while (manager.getStatus() != AppStatus.STOPPED) {
            String pythonDict = tuner.readLine();
            if (pythonDict == null)
                break;

            // At the end of the tuning, Opentuner will send "Completed"
            // msg. This means no more tuning.
            if (pythonDict.equals("Completed")) {
                handleTermination();
                break;
            }

            System.out.println("----------------------------------------------");
            System.out.println(tryCount++);
            Configuration config = rebuildConfiguration(pythonDict, app.blobConfiguration);

            if (GlobalConstants.saveAllConfigurations)
                saveConfg(config, tryCount);

            try {
                if (!cfgManager.newConfiguration(config)) {
                    tuner.writeLine("-1");
                    continue;
                }

                if (manager.isRunning()) {
                    boolean state = drainer.startDraining(0);
                    if (!state) {
                        System.err.println("Final drain has already been called. no more tuning.");
                        tuner.writeLine("exit");
                        break;
                    }

                    System.err.println("awaitDrainedIntrmdiate");
                    drainer.awaitDrainedIntrmdiate();

                    if (GlobalConstants.useDrainData) {
                        System.err.println("awaitDrainData...");
                        drainer.awaitDrainData();
                        DrainData drainData = drainer.getDrainData();
                        app.drainData = drainData;
                    }
                }

                drainer.setBlobGraph(app.blobGraph);
                System.err.println("Reconfiguring...");
                if (manager.reconfigure()) {
                    Stopwatch stopwatch = Stopwatch.createStarted();
                    manager.awaitForFixInput();
                    stopwatch.stop();
                    // TODO: need to check the manager's status before
                    // passing
                    // the time. Exceptions, final drain, etc may causes app
                    // to
                    // stop executing.
                    long time = stopwatch.elapsed(TimeUnit.MILLISECONDS);

                    System.out.println("Execution time is " + time + " milli seconds");
                    tuner.writeLine(new Double(time).toString());
                } else {
                    tuner.writeLine("-1");
                    continue;
                }
            } catch (Exception ex) {
                System.err.println("Couldn't compile the stream graph with this configuration");
                tuner.writeLine("-1");
            }
        }

    } catch (IOException e) {
        e.printStackTrace();
    }

    try {
        drainer.dumpDraindataStatistics();
    } catch (IOException e) {
        e.printStackTrace();
    }
}

From source file:com.vmware.photon.controller.apife.backends.DiskSqlBackend.java

/**
 * Create disk in DB using disk spec.//from  w  w  w. j  a va 2  s  .c o  m
 */
private BaseDiskEntity create(String projectId, DiskCreateSpec spec) throws ExternalException {
    Stopwatch createWatch = Stopwatch.createStarted();

    ProjectEntity project = projectBackend.findById(projectId);

    String kind = spec.getKind();

    // flavor need to be queried before locality affinity entity is saved, otherwise,
    // hibernate will throw exception for disk entity is not saved yet
    FlavorEntity flavorEntity = flavorBackend.getEntityByNameAndKind(spec.getFlavor(), kind);
    BaseDiskEntity disk;

    switch (kind) {
    case PersistentDisk.KIND:
        PersistentDiskEntity persistentDisk = new PersistentDiskEntity();
        persistentDisk.setAffinities(localityBackend.create(persistentDisk, spec.getAffinities()));
        disk = persistentDisk;
        break;
    case EphemeralDisk.KIND:
        disk = new EphemeralDiskEntity();
        break;
    default:
        throw new IllegalArgumentException("Unknown disk kind: " + kind);
    }

    disk.setState(DiskState.CREATING);
    disk.setName(spec.getName());
    disk.setCapacityGb(spec.getCapacityGb());
    disk.setFlavorId(flavorEntity.getId());

    List<QuotaLineItemEntity> enhancedCost = new ArrayList<>(flavorEntity.getCost());
    String capacityKey = kind + ".capacity";
    QuotaLineItemEntity capacity = new QuotaLineItemEntity(capacityKey, spec.getCapacityGb(), QuotaUnit.GB);
    for (QuotaLineItemEntity qli : enhancedCost) {

        // assert/crash if capacity key is present in a disk entity's static cost
        // this is computed in this code at runtime.
        if (qli.getKey().equals(capacityKey)) {
            checkState(!qli.getKey().equals(capacityKey));
        }
    }
    enhancedCost.add(capacity);
    disk.setCost(enhancedCost);
    disk.setProjectId(project.getId());

    String resourceTickedId = project.getResourceTicketId();

    Stopwatch resourceTicketWatch = Stopwatch.createStarted();
    resourceTicketBackend.consumeQuota(resourceTickedId, new QuotaCost(disk.getCost()));
    resourceTicketWatch.stop();
    logger.info(
            "DiskSqlBackend.create for Disk Id: {}, Kind:{}, resourceTicket {}, consumeQuota in {} milliseconds",
            disk.getId(), kind, resourceTickedId, resourceTicketWatch.elapsed(TimeUnit.MILLISECONDS));

    BaseDiskDao<BaseDiskEntity> diskDao = getDiskDao(spec.getKind());

    createWatch.stop();
    logger.info("DiskSqlBackend.create for Disk Id: {}, Kind:{} took {} milliseconds", disk.getId(), kind,
            createWatch.elapsed(TimeUnit.MILLISECONDS));

    return diskDao.create(disk);
}

From source file:com.google.gerrit.server.notedb.PrimaryStorageMigrator.java

public void migrateToReviewDbPrimary(Change.Id id, @Nullable Project.NameKey project)
        throws OrmException, IOException {
    // Migrating back to ReviewDb primary is much simpler than the original migration to NoteDb
    // primary, because when NoteDb is primary, each write only goes to one storage location rather
    // than both. We only need to consider whether a concurrent writer (OR) conflicts with the first
    // setReadOnlyInNoteDb step (MR) in this method.
    //// ww  w . j a va  2s  . c o  m
    // If OR wins, then either:
    // * MR will set read-only after OR is completed, which is not a concurrent write.
    // * MR will fail to set read-only with a lock failure. The caller will have to retry, but the
    //   change is not in a read-only state, so behavior is not degraded in the meantime.
    //
    // If MR wins, then either:
    // * OR will fail with a read-only exception (via AbstractChangeNotes#apply).
    // * OR will fail with a lock failure.
    //
    // In all of these scenarios, the change is read-only if and only if MR succeeds.
    //
    // There will be no concurrent writes to ReviewDb for this change until
    // setPrimaryStorageReviewDb completes, because ReviewDb writes are not attempted when primary
    // storage is NoteDb. After the primary storage changes back, it is possible for subsequent
    // NoteDb writes to conflict with the releaseReadOnlyLeaseInNoteDb step, but at this point,
    // since ReviewDb is primary, we are back to ignoring them.
    Stopwatch sw = Stopwatch.createStarted();
    if (project == null) {
        project = getProject(id);
    }
    ObjectId newMetaId = setReadOnlyInNoteDb(project, id);
    rebuilder.rebuildReviewDb(db(), project, id);
    setPrimaryStorageReviewDb(id, newMetaId);
    releaseReadOnlyLeaseInNoteDb(project, id);
    log.info("Migrated change {} to ReviewDb primary in {}ms", id, sw.elapsed(MILLISECONDS));
}