Example usage for com.google.common.base Stopwatch stop

List of usage examples for com.google.common.base Stopwatch stop

Introduction

In this page you can find the example usage for com.google.common.base Stopwatch stop.

Prototype

public Stopwatch stop() 

Source Link

Document

Stops the stopwatch.

Usage

From source file:org.janusgraph.graphdb.database.idassigner.StandardIDPool.java

private synchronized void waitForIDBlockGetter() throws InterruptedException {
    Stopwatch sw = Stopwatch.createStarted();
    if (null != idBlockFuture) {
        try {//from ww w  .ja v a2 s  . co  m
            nextBlock = idBlockFuture.get(renewTimeout.toMillis(), TimeUnit.MILLISECONDS);
        } catch (ExecutionException e) {
            String msg = String.format(
                    "ID block allocation on partition(%d)-namespace(%d) failed with an exception in %s",
                    partition, idNamespace, sw.stop());
            throw new JanusGraphException(msg, e);
        } catch (TimeoutException e) {
            String msg = String.format("ID block allocation on partition(%d)-namespace(%d) timed out in %s",
                    partition, idNamespace, sw.stop());
            // Attempt to cancel the renewer
            idBlockGetter.stopRequested();
            if (idAuthority.supportsInterruption()) {
                idBlockFuture.cancel(true);
            } else {
                // Attempt to clean one dead element out of closeBlockers every time we append to it
                if (!closeBlockers.isEmpty()) {
                    Future<?> f = closeBlockers.peek();
                    if (null != f && f.isDone())
                        closeBlockers.remove();
                }
                closeBlockers.add(idBlockFuture);
            }
            throw new JanusGraphException(msg, e);
        } catch (CancellationException e) {
            String msg = String.format(
                    "ID block allocation on partition(%d)-namespace(%d) was cancelled after %s", partition,
                    idNamespace, sw.stop());
            throw new JanusGraphException(msg, e);
        } finally {
            idBlockFuture = null;
        }
        // Allow InterruptedException to propagate up the stack
    }
}

From source file:eugene.simulation.agent.impl.StartAgentsBehaviour.java

@Override
public void action() {
    final Set<String> started = new HashSet<String>();
    final Stopwatch stopwatch = new Stopwatch().start();
    try {//from   ww w  . ja va2s . c  o  m
        int i = 0;
        for (final Agent a : agents) {
            final Simulation simulation = new SimulationImpl(myAgent.getAID(), marketAgent.getObject(), symbol);
            a.setArguments(new Simulation[] { simulation });
            final AgentContainer container = myAgent.getContainerController();
            final AgentController controller = container.acceptNewAgent(a.getClass().getSimpleName() + i++, a);
            controller.start();
            started.add(controller.getName());
        }

        LOG.info("Starting agents took {}", stopwatch.stop());
        result.success(started);
    } catch (StaleProxyException e) {
        LOG.error(ERROR_MSG, e);
        result.fail(started);
    }
}

From source file:org.locationtech.geogig.api.RevTreeBuilder.java

/**
 * Splits the cached entries into subtrees and saves them, making sure the tree contains either
 * only entries or subtrees//from w  w w  . ja  va 2 s  . c om
 */
private RevTree normalize() {
    Stopwatch sw = Stopwatch.createStarted();
    RevTree unnamedTree;

    final int numPendingChanges = numPendingChanges();
    if (bucketTreesByBucket.isEmpty() && numPendingChanges <= NORMALIZED_SIZE_LIMIT) {
        unnamedTree = normalizeToChildren();
    } else {
        unnamedTree = normalizeToBuckets();
        checkState(featureChanges.isEmpty());
        checkState(treeChanges.isEmpty());

        if (unnamedTree.size() <= NORMALIZED_SIZE_LIMIT) {
            this.bucketTreesByBucket.clear();
            if (unnamedTree.buckets().isPresent()) {
                unnamedTree = moveBucketsToChildren(unnamedTree);
            }
            if (this.depth == 0) {
                pendingWritesCache.clear();
            }
        }
    }

    final int pendingWritesThreshold = 10 * 1000;
    final boolean topLevelTree = this.depth == 0;// am I an actual (addressable) tree or bucket
                                                 // tree of a higher level one?
    final boolean forceWrite = pendingWritesCache.size() >= pendingWritesThreshold;
    if (!pendingWritesCache.isEmpty() && (topLevelTree || forceWrite)) {
        LOGGER.debug("calling db.putAll for {} buckets because {}...", pendingWritesCache.size(),
                (topLevelTree ? "writing top level tree"
                        : "there are " + pendingWritesCache.size() + " pending bucket writes"));
        Stopwatch sw2 = Stopwatch.createStarted();
        db.putAll(pendingWritesCache.values().iterator());
        pendingWritesCache.clear();
        LOGGER.debug("done in {}", sw2.stop());
    }
    this.initialSize = unnamedTree.size();
    this.initialNumTrees = unnamedTree.numTrees();
    if (this.depth == 0) {
        LOGGER.debug("Normalization took {}. Changes: {}", sw.stop(), numPendingChanges);
    }
    return unnamedTree;
}

From source file:org.lenskit.cli.commands.Recommend.java

@Override
public void execute(Namespace opts) throws IOException, RecommenderBuildException {
    Context ctx = new Context(opts);
    LenskitRecommenderEngine engine = ctx.loader.loadEngine();

    List<Long> users = ctx.options.get("users");
    final int n = ctx.options.getInt("num_recs");

    try (LenskitRecommender rec = engine.createRecommender()) {
        ItemRecommender irec = rec.getItemRecommender();
        ItemNameDAO indao = rec.get(ItemNameDAO.class);
        if (irec == null) {
            logger.error("recommender has no item recommender");
            throw new UnsupportedOperationException("no item recommender");
        }/*  w  w  w  .  j  ava  2  s .  c  o m*/

        logger.info("recommending for {} users", users.size());
        Stopwatch timer = Stopwatch.createStarted();
        for (long user : users) {
            ResultList recs = irec.recommendWithDetails(user, n, null, null);
            System.out.format("recommendations for user %d:%n", user);
            for (Result item : recs) {
                System.out.format("  %d", item.getId());
                if (indao != null) {
                    System.out.format(" (%s)", indao.getItemName(item.getId()));
                }
                System.out.format(": %.3f", item.getScore());
                System.out.println();
            }
        }
        timer.stop();
        logger.info("recommended for {} users in {}", users.size(), timer);
    }
}

From source file:org.opendaylight.controller.cluster.raft.RaftActorRecoverySupport.java

private void onRecoveredSnapshot(SnapshotOffer offer) {
    if (log.isDebugEnabled()) {
        log.debug("{}: SnapshotOffer called..", context.getId());
    }//from w  w w .  j  a  v  a  2s  .co m

    initRecoveryTimer();

    Snapshot snapshot = (Snapshot) offer.snapshot();

    for (ReplicatedLogEntry entry : snapshot.getUnAppliedEntries()) {
        if (isMigratedPayload(entry)) {
            hasMigratedDataRecovered = true;
        }
    }

    if (!context.getPersistenceProvider().isRecoveryApplicable()) {
        // We may have just transitioned to disabled and have a snapshot containing state data and/or log
        // entries - we don't want to preserve these, only the server config and election term info.

        snapshot = Snapshot.create(new byte[0], Collections.emptyList(), -1, -1, -1, -1,
                snapshot.getElectionTerm(), snapshot.getElectionVotedFor(), snapshot.getServerConfiguration());
    }

    // Create a replicated log with the snapshot information
    // The replicated log can be used later on to retrieve this snapshot
    // when we need to install it on a peer

    context.setReplicatedLog(ReplicatedLogImpl.newInstance(snapshot, context));
    context.setLastApplied(snapshot.getLastAppliedIndex());
    context.setCommitIndex(snapshot.getLastAppliedIndex());
    context.getTermInformation().update(snapshot.getElectionTerm(), snapshot.getElectionVotedFor());

    Stopwatch timer = Stopwatch.createStarted();

    // Apply the snapshot to the actors state
    cohort.applyRecoverySnapshot(snapshot.getState());

    if (snapshot.getServerConfiguration() != null) {
        context.updatePeerIds(snapshot.getServerConfiguration());

        if (isMigratedSerializable(snapshot.getServerConfiguration())) {
            hasMigratedDataRecovered = true;
        }
    }

    timer.stop();
    log.info("Recovery snapshot applied for {} in {}: snapshotIndex={}, snapshotTerm={}, journal-size={}",
            context.getId(), timer.toString(), replicatedLog().getSnapshotIndex(),
            replicatedLog().getSnapshotTerm(), replicatedLog().size());
}

From source file:org.apache.distributedlog.lock.ZKDistributedLock.java

/**
 * Asynchronously acquire the lock. Technically the try phase of this operation--which adds us to the waiter
 * list--is executed synchronously, but the lock wait itself doesn't block.
 *///from   w w w  .  ja  v  a2  s  .  com
@Override
public synchronized CompletableFuture<ZKDistributedLock> asyncAcquire() {
    if (null != lockAcquireFuture) {
        return FutureUtils
                .exception(new UnexpectedException("Someone is already acquiring/acquired lock " + lockPath));
    }
    final CompletableFuture<ZKDistributedLock> promise = FutureUtils.createFuture();
    promise.whenComplete((zkDistributedLock, throwable) -> {
        if (null == throwable || !(throwable instanceof CancellationException)) {
            return;
        }
        lockStateExecutor.executeOrdered(lockPath, () -> asyncClose());
    });
    final Stopwatch stopwatch = Stopwatch.createStarted();
    promise.whenComplete(new FutureEventListener<ZKDistributedLock>() {
        @Override
        public void onSuccess(ZKDistributedLock lock) {
            acquireStats.registerSuccessfulEvent(stopwatch.stop().elapsed(TimeUnit.MICROSECONDS),
                    TimeUnit.MICROSECONDS);
        }

        @Override
        public void onFailure(Throwable cause) {
            acquireStats.registerFailedEvent(stopwatch.stop().elapsed(TimeUnit.MICROSECONDS),
                    TimeUnit.MICROSECONDS);
            // release the lock if fail to acquire
            asyncClose();
        }
    });
    this.lockAcquireFuture = promise;
    lockStateExecutor.executeOrdered(lockPath, () -> doAsyncAcquireWithSemaphore(promise, lockTimeout));
    return promise;
}

From source file:co.rsk.bitcoinj.params.AbstractBitcoinNetParams.java

@Override
public void checkDifficultyTransitions(final StoredBlock storedPrev, final BtcBlock nextBlock,
        final BtcBlockStore blockStore) throws VerificationException, BlockStoreException {
    BtcBlock prev = storedPrev.getHeader();

    // Is this supposed to be a difficulty transition point?
    if (!isDifficultyTransitionPoint(storedPrev)) {

        // No ... so check the difficulty didn't actually change.
        if (nextBlock.getDifficultyTarget() != prev.getDifficultyTarget())
            throw new VerificationException("Unexpected change in difficulty at height "
                    + storedPrev.getHeight() + ": " + Long.toHexString(nextBlock.getDifficultyTarget()) + " vs "
                    + Long.toHexString(prev.getDifficultyTarget()));
        return;/*from  w w w. j  a  v a 2  s .  co m*/
    }

    // We need to find a block far back in the chain. It's OK that this is expensive because it only occurs every
    // two weeks after the initial block chain download.
    final Stopwatch watch = Stopwatch.createStarted();
    StoredBlock cursor = blockStore.get(prev.getHash());
    for (int i = 0; i < this.getInterval() - 1; i++) {
        if (cursor == null) {
            // This should never happen. If it does, it means we are following an incorrect or busted chain.
            throw new VerificationException(
                    "Difficulty transition point but we did not find a way back to the genesis block.");
        }
        cursor = blockStore.get(cursor.getHeader().getPrevBlockHash());
    }
    watch.stop();
    if (watch.elapsed(TimeUnit.MILLISECONDS) > 50)
        log.info("Difficulty transition traversal took {}", watch);

    BtcBlock blockIntervalAgo = cursor.getHeader();
    int timespan = (int) (prev.getTimeSeconds() - blockIntervalAgo.getTimeSeconds());
    // Limit the adjustment step.
    final int targetTimespan = this.getTargetTimespan();
    if (timespan < targetTimespan / 4)
        timespan = targetTimespan / 4;
    if (timespan > targetTimespan * 4)
        timespan = targetTimespan * 4;

    BigInteger newTarget = Utils.decodeCompactBits(prev.getDifficultyTarget());
    newTarget = newTarget.multiply(BigInteger.valueOf(timespan));
    newTarget = newTarget.divide(BigInteger.valueOf(targetTimespan));

    if (newTarget.compareTo(this.getMaxTarget()) > 0) {
        log.info("Difficulty hit proof of work limit: {}", newTarget.toString(16));
        newTarget = this.getMaxTarget();
    }

    int accuracyBytes = (int) (nextBlock.getDifficultyTarget() >>> 24) - 3;
    long receivedTargetCompact = nextBlock.getDifficultyTarget();

    // The calculated difficulty is to a higher precision than received, so reduce here.
    BigInteger mask = BigInteger.valueOf(0xFFFFFFL).shiftLeft(accuracyBytes * 8);
    newTarget = newTarget.and(mask);
    long newTargetCompact = Utils.encodeCompactBits(newTarget);

    if (newTargetCompact != receivedTargetCompact)
        throw new VerificationException("Network provided difficulty bits do not match what was calculated: "
                + Long.toHexString(newTargetCompact) + " vs " + Long.toHexString(receivedTargetCompact));
}

From source file:brooklyn.location.softlayer.bms.SoftlayerBmsLocation.java

protected void waitForActive(final String hardwareId, long deadline) {
    Callable<Boolean> checker = new Callable<Boolean>() {
        public Boolean call() {
            String status = softLayerBmsClient.getHardwareStatus(hardwareId);
            return status.equals("ACTIVE");
        }/*from  w  ww .j  av  a2 s. c om*/
    };

    Stopwatch stopwatch = Stopwatch.createStarted();
    boolean reachable = new Repeater().every(1, SECONDS).until(checker).limitTimeTo(deadline, HOURS).run();
    if (!reachable) {
        throw new IllegalStateException("Bare metal server " + hardwareId + " failed to become ACTIVE in "
                + Time.makeTimeStringRounded(stopwatch));
    }

    LOG.debug("Bare metal server with hardwareId {}: is ACTIVE after {}",
            new Object[] { hardwareId, Time.makeTimeStringRounded(stopwatch) });
    stopwatch.stop();
}

From source file:de.schildbach.wallet.data.DynamicFeeLoader.java

private static void fetchDynamicFees(final HttpUrl url, final File tempFile, final File targetFile,
        final String userAgent) {
    final Stopwatch watch = Stopwatch.createStarted();

    final Request.Builder request = new Request.Builder();
    request.url(url);/*  ww w . j  ava2s .c o  m*/
    request.header("User-Agent", userAgent);
    if (targetFile.exists())
        request.header("If-Modified-Since", HttpDate.format(new Date(targetFile.lastModified())));

    final OkHttpClient httpClient = Constants.HTTP_CLIENT.clone();
    httpClient.setConnectTimeout(5, TimeUnit.SECONDS);
    httpClient.setWriteTimeout(5, TimeUnit.SECONDS);
    httpClient.setReadTimeout(5, TimeUnit.SECONDS);
    final Call call = httpClient.newCall(request.build());
    try {
        final Response response = call.execute();
        final int status = response.code();
        if (status == HttpURLConnection.HTTP_NOT_MODIFIED) {
            log.info("Dynamic fees not modified at {}, took {}", url, watch);
        } else if (status == HttpURLConnection.HTTP_OK) {
            final ResponseBody body = response.body();
            final FileOutputStream os = new FileOutputStream(tempFile);
            Io.copy(body.byteStream(), os);
            os.close();
            final Date lastModified = response.headers().getDate("Last-Modified");
            if (lastModified != null)
                tempFile.setLastModified(lastModified.getTime());
            body.close();
            if (!tempFile.renameTo(targetFile))
                throw new IllegalStateException("Cannot rename " + tempFile + " to " + targetFile);
            watch.stop();
            log.info("Dynamic fees fetched from {}, took {}", url, watch);
        } else {
            log.warn("HTTP status {} when fetching dynamic fees from {}", response.code(), url);
        }
    } catch (final Exception x) {
        log.warn("Problem when fetching dynamic fees rates from " + url, x);
    }
}

From source file:ddf.catalog.resource.download.ReliableResourceDownloadManager.java

/**
 * @param resourceRequest/* w  w w.  j  a va2 s.com*/
 *            the original @ResourceRequest to retrieve the resource
 * @param metacard
 *            the @Metacard associated with the resource being downloaded
 * @param retriever
 *            the @ResourceRetriever to be used to get the resource
 * @return the modified @ResourceResponse with the @ReliableResourceInputStream that the client
 *         should read from
 * @throws DownloadException
 */
public ResourceResponse download(ResourceRequest resourceRequest, Metacard metacard,
        ResourceRetriever retriever) throws DownloadException {

    if (metacard == null) {
        throw new DownloadException("Cannot download resource if metacard is null");
    } else if (StringUtils.isBlank(metacard.getId())) {
        throw new DownloadException("Metacard must have unique id.");
    } else if (retriever == null) {
        throw new DownloadException("Cannot download resource if retriever is null");
    } else if (resourceRequest == null) {
        throw new DownloadException("Cannot download resource if request is null");
    }

    try {
        resourceResponse = retriever.retrieveResource();
    } catch (ResourceNotFoundException | ResourceNotSupportedException | IOException e) {
        throw new DownloadException("Cannot download resource", e);
    }

    resourceResponse.getProperties().put(Metacard.ID, metacard.getId());
    // Sources do not create ResourceResponses with the original ResourceRequest, hence
    // it is added here because it will be needed for caching
    resourceResponse = new ResourceResponseImpl(resourceRequest, resourceResponse.getProperties(),
            resourceResponse.getResource());

    // TODO - this should be before retrieveResource() but eventPublisher requires a
    // resourceResponse and that resource response must have a resource request in it (to get
    // USER property)
    eventPublisher.postRetrievalStatus(resourceResponse, ProductRetrievalStatus.STARTED, metacard, null, 0L,
            downloadIdentifier);

    AtomicBoolean downloadStarted = new AtomicBoolean(Boolean.FALSE);
    ReliableResourceDownloader downloader = new ReliableResourceDownloader(downloaderConfig, downloadStarted,
            downloadIdentifier, resourceResponse, retriever);
    resourceResponse = downloader.setupDownload(metacard, downloadStatusInfo);

    // Start download in separate thread so can return ResourceResponse with
    // ReliableResourceInputStream available for client to start reading from
    executor.submit(downloader);

    // Wait for download to get started before returning control to client
    Stopwatch stopwatch = Stopwatch.createStarted();
    while (!downloadStarted.get()) {
        try {
            Thread.sleep(10);
        } catch (InterruptedException e) {
        }
        long elapsedTime = stopwatch.elapsed(TimeUnit.MILLISECONDS);
        if (elapsedTime > ONE_SECOND_IN_MS) {
            LOGGER.debug("downloadStarted still FALSE - elapsedTime = {}", elapsedTime);
            break;
        }
    }
    LOGGER.debug("elapsedTime = {}", stopwatch.elapsed(TimeUnit.MILLISECONDS));
    stopwatch.stop();

    return resourceResponse;
}