Example usage for com.google.common.base Stopwatch elapsed

List of usage examples for com.google.common.base Stopwatch elapsed

Introduction

In this page you can find the example usage for com.google.common.base Stopwatch elapsed.

Prototype

@CheckReturnValue
public long elapsed(TimeUnit desiredUnit) 

Source Link

Document

Returns the current elapsed time shown on this stopwatch, expressed in the desired time unit, with any fraction rounded down.

Usage

From source file:com.spotify.helios.agent.TaskRunner.java

private void pullImage(final String image) throws DockerException, InterruptedException {
    listener.pulling();// w w  w  .  j  a  va  2  s. c o m

    DockerTimeoutException wasTimeout = null;
    final Stopwatch pullTime = Stopwatch.createStarted();

    // Attempt to pull.  Failure, while less than ideal, is ok.
    try {
        docker.pull(image);
        listener.pulled();
        log.info("Pulled image {} in {}s", image, pullTime.elapsed(SECONDS));
    } catch (DockerTimeoutException e) {
        log.warn("Pulling image {} failed with timeout after {}s", image, pullTime.elapsed(SECONDS), e);
        listener.pullFailed();
        wasTimeout = e;
    } catch (DockerException e) {
        log.warn("Pulling image {} failed after {}s", image, pullTime.elapsed(SECONDS), e);
        listener.pullFailed();
    }

    try {
        // If we don't have the image by now, fail.
        docker.inspectImage(image);
    } catch (ImageNotFoundException e) {
        // If we get not found, see if we timed out above, since that's what we actually care
        // to know, as the pull should have fixed the not found-ness.
        if (wasTimeout != null) {
            throw new ImagePullFailedException("Failed pulling image " + image + " because of timeout",
                    wasTimeout);
        }
        throw e;
    }
}

From source file:com.microsoft.azure.servicebus.samples.prefetch.Prefetch.java

long sendAndReceiveMessages(IMessageSender sender, IMessageReceiver receiver, int messageCount)
        throws Exception {
    // Now we can start sending messages.
    Random rnd = new Random();
    byte[] mockPayload = new byte[100]; // 100 random-byte payload

    rnd.nextBytes(mockPayload);//from   w  w  w .ja va2  s.co m

    System.out.printf("\nSending %d messages to the queue\n", messageCount);
    ArrayList<CompletableFuture<Void>> sendOps = new ArrayList<>();
    for (int i = 0; i < messageCount; i++) {
        IMessage message = new Message(mockPayload);
        message.setTimeToLive(Duration.ofMinutes(5));
        sendOps.add(sender.sendAsync(message));
    }
    CompletableFuture.allOf(sendOps.toArray(new CompletableFuture<?>[sendOps.size()])).join();

    System.out.printf("Send completed\n");

    // Receive the messages
    System.out.printf("Receiving messages...\n");

    // Start stopwatch
    Stopwatch stopWatch = Stopwatch.createStarted();

    IMessage receivedMessage = receiver.receive(Duration.ofSeconds(5));
    while (receivedMessage != null) {
        // here's where you'd do any work

        // complete (round trips)
        receiver.complete(receivedMessage.getLockToken());

        if (--messageCount <= 0)
            break;

        // now get the next message
        receivedMessage = receiver.receive(Duration.ofSeconds(5));
    }
    // Stop the stopwatch
    stopWatch.stop();

    System.out.printf("Receive completed\n");

    long timeTaken = stopWatch.elapsed(TimeUnit.MILLISECONDS);
    System.out.printf("Time to receive and complete all messages = %d milliseconds\n", timeTaken);

    return timeTaken;
}

From source file:com.google.gerrit.server.notedb.PrimaryStorageMigrator.java

private Retryer<NoteDbChangeState> ensureRebuiltRetryer(Stopwatch sw) {
    if (testEnsureRebuiltRetryer != null) {
        return testEnsureRebuiltRetryer;
    }/* w w  w.j  av a 2  s  .c  om*/
    // Retry the ensureRebuilt step with backoff until half the timeout has
    // expired, leaving the remaining half for the rest of the steps.
    long remainingNanos = (MILLISECONDS.toNanos(timeoutMs) / 2) - sw.elapsed(NANOSECONDS);
    remainingNanos = Math.max(remainingNanos, 0);
    return RetryerBuilder.<NoteDbChangeState>newBuilder()
            .retryIfException(e -> (e instanceof IOException) || (e instanceof OrmException))
            .withWaitStrategy(WaitStrategies.join(WaitStrategies.exponentialWait(250, MILLISECONDS),
                    WaitStrategies.randomWait(50, MILLISECONDS)))
            .withStopStrategy(StopStrategies.stopAfterDelay(remainingNanos, NANOSECONDS)).build();
}

From source file:org.cinchapi.concourse.importer.cli.AbstractImportCli.java

@Override
protected final void doTask() {
    ExecutorService executor = Executors.newFixedThreadPool(((ImportOptions) options).numThreads);
    String data = ((ImportOptions) options).data;
    List<String> files = scan(Paths.get(data));
    Stopwatch watch = Stopwatch.createStarted();
    for (final String file : files) {
        executor.execute(new Runnable() {

            @Override//from w w  w  .  j a  v a  2 s  . c  o  m
            public void run() {
                doImport(file);
            }

        });
    }
    executor.shutdown();
    while (!executor.isTerminated()) {
        continue; // block until all tasks are completed
    }
    watch.stop();
    TimeUnit unit = TimeUnit.MILLISECONDS;
    System.out.println(MessageFormat.format("Finished import in {0} {1}", watch.elapsed(unit), unit));
}

From source file:org.apache.drill.exec.store.parquet.AbstractParquetScanBatchCreator.java

protected ScanBatch getBatch(ExecutorFragmentContext context, AbstractParquetRowGroupScan rowGroupScan,
        OperatorContext oContext) throws ExecutionSetupException {
    final ColumnExplorer columnExplorer = new ColumnExplorer(context.getOptions(), rowGroupScan.getColumns());

    if (!columnExplorer.isStarQuery()) {
        rowGroupScan = rowGroupScan.copy(columnExplorer.getTableColumns());
        rowGroupScan.setOperatorId(rowGroupScan.getOperatorId());
    }/*from w w  w. j  a  v  a2 s. com*/

    AbstractDrillFileSystemManager fsManager = getDrillFileSystemCreator(oContext, context.getOptions());

    // keep footers in a map to avoid re-reading them
    Map<String, ParquetMetadata> footers = new HashMap<>();
    List<RecordReader> readers = new LinkedList<>();
    List<Map<String, String>> implicitColumns = new ArrayList<>();
    Map<String, String> mapWithMaxColumns = new LinkedHashMap<>();
    for (RowGroupReadEntry rowGroup : rowGroupScan.getRowGroupReadEntries()) {
        /*
        Here we could store a map from file names to footers, to prevent re-reading the footer for each row group in a file
        TODO - to prevent reading the footer again in the parquet record reader (it is read earlier in the ParquetStorageEngine)
        we should add more information to the RowGroupInfo that will be populated upon the first read to
        provide the reader with all of th file meta-data it needs
        These fields will be added to the constructor below
        */
        try {
            Stopwatch timer = logger.isTraceEnabled() ? Stopwatch.createUnstarted() : null;
            DrillFileSystem fs = fsManager.get(rowGroupScan.getFsConf(rowGroup), rowGroup.getPath());
            if (!footers.containsKey(rowGroup.getPath())) {
                if (timer != null) {
                    timer.start();
                }

                ParquetMetadata footer = readFooter(fs.getConf(), rowGroup.getPath());
                if (timer != null) {
                    long timeToRead = timer.elapsed(TimeUnit.MICROSECONDS);
                    logger.trace("ParquetTrace,Read Footer,{},{},{},{},{},{},{}", "", rowGroup.getPath(), "", 0,
                            0, 0, timeToRead);
                }
                footers.put(rowGroup.getPath(), footer);
            }
            ParquetMetadata footer = footers.get(rowGroup.getPath());

            boolean autoCorrectCorruptDates = rowGroupScan.areCorruptDatesAutoCorrected();
            ParquetReaderUtility.DateCorruptionStatus containsCorruptDates = ParquetReaderUtility
                    .detectCorruptDates(footer, rowGroupScan.getColumns(), autoCorrectCorruptDates);
            logger.debug("Contains corrupt dates: {}", containsCorruptDates);

            if (!context.getOptions().getBoolean(ExecConstants.PARQUET_NEW_RECORD_READER)
                    && !isComplex(footer)) {
                readers.add(new ParquetRecordReader(context, rowGroup.getPath(), rowGroup.getRowGroupIndex(),
                        rowGroup.getNumRecordsToRead(), fs,
                        CodecFactory.createDirectCodecFactory(fs.getConf(),
                                new ParquetDirectByteBufferAllocator(oContext.getAllocator()), 0),
                        footer, rowGroupScan.getColumns(), containsCorruptDates));
            } else {
                readers.add(new DrillParquetReader(context, footer, rowGroup, columnExplorer.getTableColumns(),
                        fs, containsCorruptDates));
            }

            List<String> partitionValues = rowGroupScan.getPartitionValues(rowGroup);
            Map<String, String> implicitValues = columnExplorer.populateImplicitColumns(rowGroup.getPath(),
                    partitionValues, rowGroupScan.supportsFileImplicitColumns());
            implicitColumns.add(implicitValues);
            if (implicitValues.size() > mapWithMaxColumns.size()) {
                mapWithMaxColumns = implicitValues;
            }

        } catch (IOException e) {
            throw new ExecutionSetupException(e);
        }
    }

    // all readers should have the same number of implicit columns, add missing ones with value null
    Map<String, String> diff = Maps.transformValues(mapWithMaxColumns, Functions.constant((String) null));
    for (Map<String, String> map : implicitColumns) {
        map.putAll(Maps.difference(map, diff).entriesOnlyOnRight());
    }

    return new ScanBatch(context, oContext, readers, implicitColumns);
}

From source file:com.vmware.photon.controller.api.common.db.TransactionalInterceptor.java

@Override
public Object invoke(MethodInvocation invocation) throws Throwable {
    SessionFactory sessionFactory = sessionFactoryProvider.get();

    Session session;//from  w w  w . java2s .c o  m
    if (ManagedSessionContext.hasBind(sessionFactory)) {
        session = sessionFactory.getCurrentSession();
    } else {
        session = sessionFactory.openSession();
        ManagedSessionContext.bind(session);
    }

    Transaction transaction = session.getTransaction();
    if (transaction.isActive()) {
        return invocation.proceed();
    }

    Stopwatch stopwatch = Stopwatch.createUnstarted();

    try {
        logger.trace("beginning transaction: {}", transaction);
        stopwatch.start();
        transaction.begin();
        Object result = invocation.proceed();
        transaction.commit();
        stopwatch.stop();
        logger.debug("committed: {}", transaction);
        return result;
    } catch (Throwable t) {
        logger.debug("rolling back: {}", transaction, t);
        transaction.rollback();
        transactionExceptions.mark();
        throw t;
    } finally {
        final long elapsedTime = stopwatch.elapsed(TimeUnit.MILLISECONDS);
        transactions.update(elapsedTime, TimeUnit.MILLISECONDS);
        ManagedSessionContext.unbind(sessionFactory);
        if (session.isOpen()) {
            session.close();
        }
        final long transactionTimeWarningThresholdInMilliseconds = 2000L;
        if (elapsedTime > transactionTimeWarningThresholdInMilliseconds) {
            logger.warn("Transaction {} took {} milliseconds", transaction, elapsedTime);
        }
    }
}

From source file:org.apache.pulsar.functions.instance.JavaInstanceRunnable.java

private void setupStateTable() throws Exception {
    if (null == stateStorageServiceUrl) {
        return;/*from  ww  w.ja v  a  2s  .  c  o  m*/
    }

    String tableNs = StateUtils.getStateNamespace(instanceConfig.getFunctionDetails().getTenant(),
            instanceConfig.getFunctionDetails().getNamespace());
    String tableName = instanceConfig.getFunctionDetails().getName();

    StorageClientSettings settings = StorageClientSettings.newBuilder().serviceUri(stateStorageServiceUrl)
            .clientName("function-" + tableNs + "/" + tableName)
            // configure a maximum 2 minutes jitter backoff for accessing table service
            .backoffPolicy(Jitter.of(Type.EXPONENTIAL, 100, 2000, 60)).build();

    // we defer creation of the state table until a java instance is running here.
    createStateTable(tableNs, tableName, settings);

    log.info("Starting state table for function {}", instanceConfig.getFunctionDetails().getName());
    this.storageClient = StorageClientBuilder.newBuilder().withSettings(settings).withNamespace(tableNs)
            .build();
    // NOTE: this is a workaround until we bump bk version to 4.9.0
    // table might just be created above, so it might not be ready for serving traffic
    Stopwatch openSw = Stopwatch.createStarted();
    while (openSw.elapsed(TimeUnit.MINUTES) < 1) {
        try {
            this.stateTable = result(storageClient.openTable(tableName));
            break;
        } catch (InternalServerException ise) {
            log.warn("Encountered internal server on opening table '{}', re-attempt in 100 milliseconds : {}",
                    tableName, ise.getMessage());
            TimeUnit.MILLISECONDS.sleep(100);
        }
    }
}

From source file:org.terasology.engine.module.ModuleManagerImpl.java

private void saveReflectionsToCacheFile(Reflections reflections, String filename) {
    String version = TerasologyVersion.getInstance().getGitCommit();
    Path root = PathManager.getInstance().getHomePath().resolve("cache");
    Path path = root.resolve(filename + version + ".xml");

    try {// w ww  .  java2s .  co  m
        Files.createDirectories(root);

        logger.info("Reading reflection content from file {}", path);
        Stopwatch sw = Stopwatch.createStarted();
        reflections.save(path.toString());
        logger.info("Reflections read in {}ms.", sw.elapsed(TimeUnit.MILLISECONDS));
    } catch (IOException e) {
        logger.warn("Could not create folder " + root, e);
    }
}

From source file:joshelser.LimitAndSumColumnFamilyIterator.java

@Override
public void seek(Range range, Collection<ByteSequence> columnFamilies, boolean inclusive) throws IOException {
    Stopwatch seekSw = Stopwatch.createStarted();
    // Make sure we invalidate our last record
    nextRecordNotFound();//from w w  w .  ja v  a 2s .  c  om

    log.debug("Seeking to " + range);

    getSource().seek(range, columnFamilies, inclusive);
    currentRange = range;
    currentColumnFamilies = columnFamilies;
    currentColumnFamiliesInclusive = inclusive;
    aggregate();
    seekSw.stop();
    log.info("Seek duration: " + seekSw.elapsed(TimeUnit.MILLISECONDS));
}

From source file:annis.gui.exporter.GeneralTextExporter.java

@Override
public boolean convertText(String queryAnnisQL, int contextLeft, int contextRight, Set<String> corpora,
        List<String> keys, String argsAsString, WebResource annisResource, Writer out, EventBus eventBus) {
    try {//ww  w . j a v a  2  s  .  co m
        // int count = service.getCount(corpusIdList, queryAnnisQL);

        if (keys == null || keys.isEmpty()) {
            // auto set
            keys = new LinkedList<>();
            keys.add("tok");
            List<AnnisAttribute> attributes = new LinkedList<>();

            for (String corpus : corpora) {
                attributes.addAll(annisResource.path("corpora").path(urlPathEscape.escape(corpus))
                        .path("annotations").queryParam("fetchvalues", "false")
                        .queryParam("onlymostfrequentvalues", "false").get(new AnnisAttributeListType()));
            }

            for (AnnisAttribute a : attributes) {
                if (a.getName() != null) {
                    String[] namespaceAndName = a.getName().split(":", 2);
                    if (namespaceAndName.length > 1) {
                        keys.add(namespaceAndName[1]);
                    } else {
                        keys.add(namespaceAndName[0]);
                    }
                }
            }
        }

        Map<String, String> args = new HashMap<>();
        for (String s : argsAsString.split("&|;")) {
            String[] splitted = s.split("=", 2);
            String key = splitted[0];
            String val = "";
            if (splitted.length > 1) {
                val = splitted[1];
            }
            args.put(key, val);
        }

        int stepSize = 10;

        // 1. Get all the matches as Salt ID
        InputStream matchStream = annisResource.path("search/find/")
                .queryParam("q", Helper.encodeJersey(queryAnnisQL))
                .queryParam("corpora", StringUtils.join(corpora, ",")).accept(MediaType.TEXT_PLAIN_TYPE)
                .get(InputStream.class);

        try (BufferedReader inReader = new BufferedReader(new InputStreamReader(matchStream, "UTF-8"))) {
            WebResource subgraphRes = annisResource.path("search/subgraph");
            MatchGroup currentMatches = new MatchGroup();
            String currentLine;
            int offset = 0;
            // 2. iterate over all matches and get the sub-graph for a group of matches
            while (!Thread.currentThread().isInterrupted() && (currentLine = inReader.readLine()) != null) {
                Match match = Match.parseFromString(currentLine);

                currentMatches.getMatches().add(match);

                if (currentMatches.getMatches().size() >= stepSize) {
                    WebResource res = subgraphRes.queryParam("left", "" + contextLeft).queryParam("right",
                            "" + contextRight);

                    if (args.containsKey("segmentation")) {
                        res = res.queryParam("segmentation", args.get("segmentation"));
                    }

                    SubgraphFilter filter = getSubgraphFilter();
                    if (filter != null) {
                        res = res.queryParam("filter", filter.name());
                    }

                    Stopwatch stopwatch = new Stopwatch();
                    stopwatch.start();
                    SaltProject p = res.post(SaltProject.class, currentMatches);
                    stopwatch.stop();

                    // dynamically adjust the number of items to fetch if single subgraph
                    // export was fast enough
                    if (stopwatch.elapsed(TimeUnit.MILLISECONDS) < 500 && stepSize < 50) {
                        stepSize += 10;
                    }

                    convertText(LegacyGraphConverter.convertToResultSet(p), keys, args, out,
                            offset - currentMatches.getMatches().size());

                    currentMatches.getMatches().clear();

                    if (eventBus != null) {
                        eventBus.post(offset + 1);
                    }
                }
                offset++;
            } // end for each line

            if (Thread.interrupted()) {
                // return from loop and abort export
                log.info("Exporter job was interrupted");
                return false;
            }

            // query the left over matches
            if (!currentMatches.getMatches().isEmpty()) {
                WebResource res = subgraphRes.queryParam("left", "" + contextLeft).queryParam("right",
                        "" + contextRight);
                if (args.containsKey("segmentation")) {
                    res = res.queryParam("segmentation", args.get("segmentation"));
                }

                SubgraphFilter filter = getSubgraphFilter();
                if (filter != null) {
                    res = res.queryParam("filter", filter.name());
                }

                SaltProject p = res.post(SaltProject.class, currentMatches);
                convertText(LegacyGraphConverter.convertToResultSet(p), keys, args, out,
                        offset - currentMatches.getMatches().size() - 1);
            }
            offset = 0;

        }

        out.append("\n");
        out.append("\n");
        out.append("finished");

        return true;

    } catch (AnnisQLSemanticsException | AnnisQLSyntaxException | AnnisCorpusAccessException
            | RemoteException ex) {
        log.error(null, ex);
    } catch (IOException ex) {
        log.error(null, ex);
    }
    return false;
}