Example usage for com.google.common.base Stopwatch reset

List of usage examples for com.google.common.base Stopwatch reset

Introduction

In this page you can find the example usage for com.google.common.base Stopwatch reset.

Prototype

public Stopwatch reset() 

Source Link

Document

Sets the elapsed time for this stopwatch to zero, and places it in a stopped state.

Usage

From source file:com.google.devtools.build.android.AndroidDataMerger.java

/**
 * Loads a list of dependency {@link SerializedAndroidData} and merge with the primary {@link
 * ParsedAndroidData}.//from ww  w  . j av a 2 s .  c  o m
 *
 * @see AndroidDataMerger#merge(ParsedAndroidData, ParsedAndroidData, UnvalidatedAndroidData,
 *     boolean) for details.
 */
UnwrittenMergedAndroidData loadAndMerge(List<? extends SerializedAndroidData> transitive,
        List<? extends SerializedAndroidData> direct, ParsedAndroidData primary, Path primaryManifest,
        boolean allowPrimaryOverrideAll) throws MergingException {
    Stopwatch timer = Stopwatch.createStarted();
    try {
        final ParsedAndroidData.Builder directBuilder = ParsedAndroidData.Builder.newBuilder();
        final ParsedAndroidData.Builder transitiveBuilder = ParsedAndroidData.Builder.newBuilder();
        final AndroidDataSerializer serializer = AndroidDataSerializer.create();
        final List<ListenableFuture<Boolean>> tasks = new ArrayList<>();
        for (final SerializedAndroidData dependency : direct) {
            tasks.add(
                    executorService.submit(new ParseDependencyDataTask(serializer, dependency, directBuilder)));
        }
        for (final SerializedAndroidData dependency : transitive) {
            tasks.add(executorService
                    .submit(new ParseDependencyDataTask(serializer, dependency, transitiveBuilder)));
        }
        // Wait for all the parsing to complete.
        FailedFutureAggregator<MergingException> aggregator = FailedFutureAggregator
                .createForMergingExceptionWithMessage("Failure(s) during dependency parsing");
        aggregator.aggregateAndMaybeThrow(tasks);
        logger.fine(String.format("Merged dependencies read in %sms", timer.elapsed(TimeUnit.MILLISECONDS)));
        timer.reset().start();
        return doMerge(transitiveBuilder.build(), directBuilder.build(), primary, primaryManifest,
                allowPrimaryOverrideAll);
    } finally {
        logger.fine(String.format("Resources merged in %sms", timer.elapsed(TimeUnit.MILLISECONDS)));
    }
}

From source file:put.ci.cevo.framework.algorithms.ApacheCMAES.java

/**
 * {@inheritDoc}//  w w  w .  jav a 2s .c o  m
 */
@Override
protected PointValuePair doOptimize() {
    // -------------------- Initialization --------------------------------

    isMinimize = getGoalType().equals(GoalType.MINIMIZE);
    final double[] guess = getStartPoint();
    // number of objective variables/problem dimension
    dimension = guess.length;
    initializeCMA(guess);
    iterations = 0;
    double bestValue = (isMinimize ? Double.MAX_VALUE : Double.MIN_VALUE);
    push(fitnessHistory, bestValue);
    PointValuePair optimum = new PointValuePair(getStartPoint(), isMinimize ? bestValue : -bestValue);
    PointValuePair lastResult = null;

    // -------------------- Generation Loop --------------------------------
    EvaluatedPopulation<double[]> evaluatedPopulation = null;

    Stopwatch stopwatch = Stopwatch.createUnstarted();
    generationLoop: for (iterations = 1; iterations <= maxIterations; iterations++) {
        stopwatch.reset();
        stopwatch.start();
        incrementIterationCount();

        // Generate and evaluate lambda offspring
        final RealMatrix arz = randn1(dimension, lambda);
        final RealMatrix arx = zeros(dimension, lambda);
        final double[] fitness = new double[lambda];
        // generate random offspring
        for (int k = 0; k < lambda; k++) {
            RealMatrix arxk = null;
            for (int i = 0; i < checkFeasableCount + 1; i++) {
                if (diagonalOnly <= 0) {
                    arxk = xmean.add(BD.multiply(arz.getColumnMatrix(k)).scalarMultiply(sigma)); // m + sig * Normal(0,C)
                } else {
                    arxk = xmean.add(times(diagD, arz.getColumnMatrix(k)).scalarMultiply(sigma));
                }
                //if (i >= checkFeasableCount ||
                //      fitfun.isFeasible(arxk.getColumn(0))) {
                //   break;
                //}
                // regenerate random arguments for row
                arz.setColumn(k, randn(dimension));
            }
            copyColumn(arxk, 0, arx, k);
            //try {
            //   valuePenaltyPairs[k] = fitfun.value(arx.getColumn(k)); // compute fitness
            //} catch (TooManyEvaluationsException e) {
            //   break generationLoop;
            //}
        }

        double newPopTime = stopwatch.elapsed(TimeUnit.MILLISECONDS) / 1000.0;
        stopwatch.reset();
        stopwatch.start();
        ArrayList<double[]> population = new ArrayList<>(lambda);
        // This is mine. I ignore constraints.
        for (int k = 0; k < lambda; ++k) {
            population.add(arx.getColumn(k));
        }

        evaluatedPopulation = populationEvaluator.evaluate(population, iterations - 1, random);
        final ValuePenaltyPair[] valuePenaltyPairs = new ValuePenaltyPair[lambda];
        for (int k = 0; k < lambda; ++k) {
            valuePenaltyPairs[k] = new ValuePenaltyPair(evaluatedPopulation.getPopulation().get(k).getFitness(),
                    0.0);
        }

        // Compute fitnesses by adding value and penalty after scaling by value range.
        double valueRange = valueRange(valuePenaltyPairs);
        for (int iValue = 0; iValue < valuePenaltyPairs.length; iValue++) {
            fitness[iValue] = valuePenaltyPairs[iValue].value + valuePenaltyPairs[iValue].penalty * valueRange;
            if (!isMinimize)
                fitness[iValue] = -fitness[iValue];
        }
        double evalTime = stopwatch.elapsed(TimeUnit.MILLISECONDS) / 1000.0;
        stopwatch.reset();
        stopwatch.start();

        // Sort by fitness and compute weighted mean into xmean
        final int[] arindex = sortedIndices(fitness);
        // Calculate new xmean, this is selection and recombination
        final RealMatrix xold = xmean; // for speed up of Eq. (2) and (3)
        final RealMatrix bestArx = selectColumns(arx, MathArrays.copyOf(arindex, mu));
        xmean = bestArx.multiply(weights);
        final RealMatrix bestArz = selectColumns(arz, MathArrays.copyOf(arindex, mu));
        final RealMatrix zmean = bestArz.multiply(weights);
        final boolean hsig = updateEvolutionPaths(zmean, xold);
        if (diagonalOnly <= 0) {
            updateCovariance(hsig, bestArx, arz, arindex, xold);
        } else {
            updateCovarianceDiagonalOnly(hsig, bestArz);
        }
        // Adapt step size sigma - Eq. (5)
        sigma *= FastMath.exp(FastMath.min(1, (normps / chiN - 1) * cs / damps));
        final double bestFitness = fitness[arindex[0]];
        final double worstFitness = fitness[arindex[arindex.length - 1]];
        if (bestValue > bestFitness) {
            bestValue = bestFitness;
            lastResult = optimum;
            optimum = new PointValuePair(bestArx.getColumn(0), isMinimize ? bestFitness : -bestFitness);
            if (getConvergenceChecker() != null && lastResult != null
                    && getConvergenceChecker().converged(iterations, optimum, lastResult)) {
                break generationLoop;
            }
        }
        // handle termination criteria
        // Break, if fitness is good enough
        if (stopFitness != 0 && bestFitness < (isMinimize ? stopFitness : -stopFitness)) {
            break generationLoop;
        }
        final double[] sqrtDiagC = sqrt(diagC).getColumn(0);
        final double[] pcCol = pc.getColumn(0);
        for (int i = 0; i < dimension; i++) {
            if (sigma * FastMath.max(FastMath.abs(pcCol[i]), sqrtDiagC[i]) > stopTolX) {
                break;
            }
            if (i >= dimension - 1) {
                break generationLoop;
            }
        }
        for (int i = 0; i < dimension; i++) {
            if (sigma * sqrtDiagC[i] > stopTolUpX) {
                break generationLoop;
            }
        }
        final double historyBest = min(fitnessHistory);
        final double historyWorst = max(fitnessHistory);
        if (iterations > 2 && FastMath.max(historyWorst, worstFitness)
                - FastMath.min(historyBest, bestFitness) < stopTolFun) {
            break generationLoop;
        }
        if (iterations > fitnessHistory.length && historyWorst - historyBest < stopTolHistFun) {
            break generationLoop;
        }
        // condition number of the covariance matrix exceeds 1e14
        if (max(diagD) / min(diagD) > 1e7) {
            break generationLoop;
        }
        // user defined termination
        if (getConvergenceChecker() != null) {
            final PointValuePair current = new PointValuePair(bestArx.getColumn(0),
                    isMinimize ? bestFitness : -bestFitness);
            if (lastResult != null && getConvergenceChecker().converged(iterations, current, lastResult)) {
                break generationLoop;
            }
            lastResult = current;
        }
        // Adjust step size in case of equal function values (flat fitness)
        if (bestValue == fitness[arindex[(int) (0.1 + lambda / 4.)]]) {
            sigma *= FastMath.exp(0.2 + cs / damps);
        }
        if (iterations > 2
                && FastMath.max(historyWorst, bestFitness) - FastMath.min(historyBest, bestFitness) == 0) {
            sigma *= FastMath.exp(0.2 + cs / damps);
        }
        // store best in history
        push(fitnessHistory, bestFitness);
        if (generateStatistics) {
            statisticsSigmaHistory.add(sigma);
            statisticsFitnessHistory.add(bestFitness);
            statisticsMeanHistory.add(xmean.transpose());
            statisticsDHistory.add(diagD.transpose().scalarMultiply(1E5));
        }

        double cmaesTime = stopwatch.elapsed(TimeUnit.MILLISECONDS) / 1000.0;
        stopwatch.reset();
        stopwatch.start();
        listener.onNextIteraction(evaluatedPopulation);
        double listernerTime = stopwatch.elapsed(TimeUnit.MILLISECONDS) / 1000.0;
        logger.info(String.format("NewPop: %.2f, Eval: %.2f, CMAES: %.2f, Listerner: %.2f", newPopTime,
                evalTime, cmaesTime, listernerTime));
    }
    listener.onLastIteraction(evaluatedPopulation);

    return optimum;
}

From source file:org.apache.rocketmq.console.task.DashboardCollectTask.java

@Scheduled(cron = "30 0/1 * * * ?")
@MultiMQAdminCmdMethod(timeoutMillis = 5000)
public void collectTopic() {
    if (!rmqConfigure.isEnableDashBoardCollect()) {
        return;//w w  w . ja v  a2 s . co m
    }
    Date date = new Date();
    Stopwatch stopwatch = Stopwatch.createStarted();
    try {
        TopicList topicList = mqAdminExt.fetchAllTopicList();
        Set<String> topicSet = topicList.getTopicList();
        for (String topic : topicSet) {
            if (topic.startsWith(MixAll.RETRY_GROUP_TOPIC_PREFIX)
                    || topic.startsWith(MixAll.DLQ_GROUP_TOPIC_PREFIX)) {
                continue;
            }

            TopicRouteData topicRouteData = mqAdminExt.examineTopicRouteInfo(topic);

            GroupList groupList = mqAdminExt.queryTopicConsumeByWho(topic);

            double inTPS = 0;

            long inMsgCntToday = 0;

            double outTPS = 0;

            long outMsgCntToday = 0;

            for (BrokerData bd : topicRouteData.getBrokerDatas()) {
                String masterAddr = bd.getBrokerAddrs().get(MixAll.MASTER_ID);
                if (masterAddr != null) {
                    try {
                        stopwatch.start();
                        log.info("start time: {}", stopwatch.toString());
                        BrokerStatsData bsd = mqAdminExt.viewBrokerStatsData(masterAddr,
                                BrokerStatsManager.TOPIC_PUT_NUMS, topic);
                        stopwatch.stop();
                        log.info("stop time : {}", stopwatch.toString());
                        stopwatch.reset();
                        inTPS += bsd.getStatsMinute().getTps();
                        inMsgCntToday += StatsAllSubCommand.compute24HourSum(bsd);
                    } catch (Exception e) {
                        //                            throw Throwables.propagate(e);
                    }
                }
            }

            if (groupList != null && !groupList.getGroupList().isEmpty()) {

                for (String group : groupList.getGroupList()) {
                    for (BrokerData bd : topicRouteData.getBrokerDatas()) {
                        String masterAddr = bd.getBrokerAddrs().get(MixAll.MASTER_ID);
                        if (masterAddr != null) {
                            try {
                                String statsKey = String.format("%s@%s", topic, group);
                                BrokerStatsData bsd = mqAdminExt.viewBrokerStatsData(masterAddr,
                                        BrokerStatsManager.GROUP_GET_NUMS, statsKey);
                                outTPS += bsd.getStatsMinute().getTps();
                                outMsgCntToday += StatsAllSubCommand.compute24HourSum(bsd);
                            } catch (Exception e) {
                                //                                    throw Throwables.propagate(e);
                            }
                        }
                    }
                }
            }

            List<String> list;
            try {
                list = dashboardCollectService.getTopicMap().get(topic);
            } catch (ExecutionException e) {
                throw Throwables.propagate(e);
            }
            if (null == list) {
                list = Lists.newArrayList();
            }

            list.add(date.getTime() + "," + new BigDecimal(inTPS).setScale(5, BigDecimal.ROUND_HALF_UP) + ","
                    + inMsgCntToday + "," + new BigDecimal(outTPS).setScale(5, BigDecimal.ROUND_HALF_UP) + ","
                    + outMsgCntToday);
            dashboardCollectService.getTopicMap().put(topic, list);

        }

        log.debug("Topic Collected Data in memory = {}"
                + JsonUtil.obj2String(dashboardCollectService.getTopicMap().asMap()));
    } catch (Exception err) {
        throw Throwables.propagate(err);
    }
}

From source file:org.locationtech.geogig.repository.WorkingTree.java

public void insert(final String treePath, @SuppressWarnings("rawtypes") final FeatureSource source,
        final Query query, ProgressListener listener) {

    final NodeRef treeRef = findOrCreateTypeTree(treePath, source);

    Long collectionSize = null;/*from  www  . ja  v a 2s  .  co m*/
    try {
        // try for a fast count
        int count = source.getCount(Query.ALL);
        if (count > -1) {
            collectionSize = Long.valueOf(count);
        }
    } catch (IOException e) {
        throw Throwables.propagate(e);
    }

    final int nFetchThreads;
    {
        // maxFeatures is assumed to be supported by all data sources, so supportsPaging depends
        // only on offset being supported
        boolean supportsPaging = source.getQueryCapabilities().isOffsetSupported();
        if (supportsPaging) {
            Platform platform = context.platform();
            int availableProcessors = platform.availableProcessors();
            nFetchThreads = Math.max(2, availableProcessors / 2);
        } else {
            nFetchThreads = 1;
        }
    }

    final ExecutorService executorService = Executors.newFixedThreadPool(2 + nFetchThreads,
            new ThreadFactoryBuilder().setNameFormat("WorkingTree-tree-builder-%d").build());

    listener.started();

    Stopwatch sw = Stopwatch.createStarted();

    final RevTree origTree = indexDatabase.getTree(treeRef.objectId());
    Platform platform = context.platform();
    RevTreeBuilder2 builder = new RevTreeBuilder2(indexDatabase, origTree, treeRef.getMetadataId(), platform,
            executorService);

    List<Future<Integer>> insertBlobsFuture = insertBlobs(source, query, executorService, listener,
            collectionSize, nFetchThreads, builder);

    RevTree newFeatureTree;
    try {
        long insertedCount = 0;
        for (Future<Integer> f : insertBlobsFuture) {
            insertedCount += f.get().longValue();
        }
        sw.stop();
        listener.setDescription(insertedCount + " distinct features inserted in " + sw);

        listener.setDescription("Building final tree...");

        sw.reset().start();
        newFeatureTree = builder.build();

        listener.setDescription(
                String.format("%d features tree built in %s", newFeatureTree.size(), sw.stop()));
        listener.complete();

    } catch (Exception e) {
        throw Throwables.propagate(Throwables.getRootCause(e));
    } finally {
        executorService.shutdown();
    }
    ObjectId newTree = context.command(WriteBack.class).setAncestor(getTreeSupplier()).setChildPath(treePath)
            .setMetadataId(treeRef.getMetadataId()).setTree(newFeatureTree).call();

    updateWorkHead(newTree);

}

From source file:com.palantir.atlasdb.transaction.impl.SnapshotTransaction.java

private void commitWrites(TransactionService transactionService) {
    if (!hasWrites()) {
        return;/*from  ww  w  .  j av a  2  s  .c  om*/
    }
    Stopwatch watch = Stopwatch.createStarted();
    LockRefreshToken commitLocksToken = acquireLocksForCommit();
    long millisForLocks = watch.elapsed(TimeUnit.MILLISECONDS);
    try {
        watch.reset().start();
        throwIfConflictOnCommit(commitLocksToken, transactionService);
        long millisCheckingForConflicts = watch.elapsed(TimeUnit.MILLISECONDS);

        watch.reset().start();
        keyValueService.multiPut(writesByTable, getStartTimestamp());
        long millisForWrites = watch.elapsed(TimeUnit.MILLISECONDS);

        // Now that all writes are done, get the commit timestamp
        // We must do this before we check that our locks are still valid to ensure that
        // other transactions that will hold these locks are sure to have start
        // timestamps after our commit timestamp.
        long commitTimestamp = timestampService.getFreshTimestamp();
        commitTsForScrubbing = commitTimestamp;

        // punch on commit so that if hard delete is the only thing happening on a system,
        // we won't block forever waiting for the unreadable timestamp to advance past the
        // scrub timestamp (same as the hard delete transaction's start timestamp)
        watch.reset().start();
        cleaner.punch(commitTimestamp);
        long millisForPunch = watch.elapsed(TimeUnit.MILLISECONDS);

        throwIfReadWriteConflictForSerializable(commitTimestamp);

        // Verify that our locks are still valid before we actually commit;
        // this check is required by the transaction protocol for correctness
        throwIfExternalAndCommitLocksNotValid(commitLocksToken);

        watch.reset().start();
        putCommitTimestamp(commitTimestamp, commitLocksToken, transactionService);
        long millisForCommitTs = watch.elapsed(TimeUnit.MILLISECONDS);

        Set<LockRefreshToken> expiredLocks = refreshExternalAndCommitLocks(commitLocksToken);
        if (!expiredLocks.isEmpty()) {
            String errorMessage = "This isn't a bug but it should happen very infrequently.  Required locks are no longer"
                    + " valid but we have already committed successfully.  "
                    + getExpiredLocksErrorString(commitLocksToken, expiredLocks);
            log.error(errorMessage, new TransactionFailedRetriableException(errorMessage));
        }
        long millisSinceCreation = System.currentTimeMillis() - timeCreated;
        if (perfLogger.isDebugEnabled()) {
            perfLogger.debug(
                    "Committed {} bytes with locks, start ts {}, commit ts {}, "
                            + "acquiring locks took {} ms, checking for conflicts took {} ms, "
                            + "writing took {} ms, punch took {} ms, putCommitTs took {} ms, "
                            + "total time since tx creation {} ms, tables: {}.",
                    byteCount.get(), getStartTimestamp(), commitTimestamp, millisForLocks,
                    millisCheckingForConflicts, millisForWrites, millisForPunch, millisForCommitTs,
                    millisSinceCreation, writesByTable.keySet());
        }
    } finally {
        lockService.unlock(commitLocksToken);
    }
}

From source file:com.google.devtools.build.android.AndroidResourceProcessor.java

/**
 * Merges all secondary resources with the primary resources.
 */// w  ww  .  j a  va  2s  .  c o m
private MergedAndroidData mergeData(final ParsedAndroidData primary, final Path primaryManifest,
        final List<? extends SerializedAndroidData> direct,
        final List<? extends SerializedAndroidData> transitive, final Path resourcesOut, final Path assetsOut,
        @Nullable final PngCruncher cruncher, final VariantType type, @Nullable final Path symbolsOut,
        @Nullable AndroidResourceClassWriter rclassWriter) throws MergingException {
    Stopwatch timer = Stopwatch.createStarted();
    final ListeningExecutorService executorService = MoreExecutors
            .listeningDecorator(Executors.newFixedThreadPool(15));
    try (Closeable closeable = ExecutorServiceCloser.createWith(executorService)) {
        AndroidDataMerger merger = AndroidDataMerger.createWithPathDeduplictor(executorService);
        UnwrittenMergedAndroidData merged = merger.loadAndMerge(transitive, direct, primary, primaryManifest,
                type != VariantType.LIBRARY);
        logger.fine(String.format("merge finished in %sms", timer.elapsed(TimeUnit.MILLISECONDS)));
        timer.reset().start();
        if (symbolsOut != null) {
            AndroidDataSerializer serializer = AndroidDataSerializer.create();
            merged.serializeTo(serializer);
            serializer.flushTo(symbolsOut);
            logger.fine(
                    String.format("serialize merge finished in %sms", timer.elapsed(TimeUnit.MILLISECONDS)));
            timer.reset().start();
        }
        if (rclassWriter != null) {
            merged.writeResourceClass(rclassWriter);
            logger.fine(String.format("write classes finished in %sms", timer.elapsed(TimeUnit.MILLISECONDS)));
            timer.reset().start();
        }
        AndroidDataWriter writer = AndroidDataWriter.createWith(resourcesOut.getParent(), resourcesOut,
                assetsOut, cruncher, executorService);
        return merged.write(writer);
    } catch (IOException e) {
        throw MergingException.wrapException(e).build();
    } finally {
        logger.fine(String.format("write merge finished in %sms", timer.elapsed(TimeUnit.MILLISECONDS)));
    }
}

From source file:org.apache.bookkeeper.bookie.Journal.java

/**
 * A thread used for persisting journal entries to journal files.
 *
 * <p>/*from  www.ja  v a2  s  . c om*/
 * Besides persisting journal entries, it also takes responsibility of
 * rolling journal files when a journal file reaches journal file size
 * limitation.
 * </p>
 * <p>
 * During journal rolling, it first closes the writing journal, generates
 * new journal file using current timestamp, and continue persistence logic.
 * Those journals will be garbage collected in SyncThread.
 * </p>
 * @see org.apache.bookkeeper.bookie.SyncThread
 */
@Override
public void run() {
    LinkedList<QueueEntry> toFlush = new LinkedList<QueueEntry>();
    ByteBuffer lenBuff = ByteBuffer.allocate(4);
    ByteBuffer paddingBuff = ByteBuffer.allocate(2 * conf.getJournalAlignmentSize());
    ZeroBuffer.put(paddingBuff);
    JournalChannel logFile = null;
    forceWriteThread.start();
    Stopwatch journalCreationWatcher = new Stopwatch();
    Stopwatch journalFlushWatcher = new Stopwatch();
    long batchSize = 0;
    try {
        List<Long> journalIds = listJournalIds(journalDirectory, null);
        // Should not use MathUtils.now(), which use System.nanoTime() and
        // could only be used to measure elapsed time.
        // http://docs.oracle.com/javase/1.5.0/docs/api/java/lang/System.html#nanoTime%28%29
        long logId = journalIds.isEmpty() ? System.currentTimeMillis() : journalIds.get(journalIds.size() - 1);
        BufferedChannel bc = null;
        long lastFlushPosition = 0;
        boolean groupWhenTimeout = false;

        long dequeueStartTime = 0L;

        QueueEntry qe = null;
        while (true) {
            // new journal file to write
            if (null == logFile) {
                logId = logId + 1;

                journalCreationWatcher.reset().start();
                logFile = new JournalChannel(journalDirectory, logId, journalPreAllocSize,
                        journalWriteBufferSize, conf.getJournalAlignmentSize(), removePagesFromCache,
                        conf.getJournalFormatVersionToWrite());
                journalCreationStats.registerSuccessfulEvent(
                        journalCreationWatcher.stop().elapsedTime(TimeUnit.NANOSECONDS), TimeUnit.NANOSECONDS);

                bc = logFile.getBufferedChannel();

                lastFlushPosition = bc.position();
            }

            if (qe == null) {
                if (dequeueStartTime != 0) {
                    journalProcessTimeStats.registerSuccessfulEvent(MathUtils.elapsedNanos(dequeueStartTime),
                            TimeUnit.NANOSECONDS);
                }

                if (toFlush.isEmpty()) {
                    qe = queue.take();
                    dequeueStartTime = MathUtils.nowInNano();
                    journalQueueStats.registerSuccessfulEvent(MathUtils.elapsedNanos(qe.enqueueTime),
                            TimeUnit.NANOSECONDS);
                } else {
                    long pollWaitTimeNanos = maxGroupWaitInNanos
                            - MathUtils.elapsedNanos(toFlush.get(0).enqueueTime);
                    if (flushWhenQueueEmpty || pollWaitTimeNanos < 0) {
                        pollWaitTimeNanos = 0;
                    }
                    qe = queue.poll(pollWaitTimeNanos, TimeUnit.NANOSECONDS);
                    dequeueStartTime = MathUtils.nowInNano();

                    if (qe != null) {
                        journalQueueStats.registerSuccessfulEvent(MathUtils.elapsedNanos(qe.enqueueTime),
                                TimeUnit.NANOSECONDS);
                    }

                    boolean shouldFlush = false;
                    // We should issue a forceWrite if any of the three conditions below holds good
                    // 1. If the oldest pending entry has been pending for longer than the max wait time
                    if (maxGroupWaitInNanos > 0 && !groupWhenTimeout
                            && (MathUtils.elapsedNanos(toFlush.get(0).enqueueTime) > maxGroupWaitInNanos)) {
                        groupWhenTimeout = true;
                    } else if (maxGroupWaitInNanos > 0 && groupWhenTimeout && qe != null
                            && MathUtils.elapsedNanos(qe.enqueueTime) < maxGroupWaitInNanos) {
                        // when group timeout, it would be better to look forward, as there might be lots of entries already timeout
                        // due to a previous slow write (writing to filesystem which impacted by force write).
                        // Group those entries in the queue
                        // a) already timeout
                        // b) limit the number of entries to group
                        groupWhenTimeout = false;
                        shouldFlush = true;
                        flushMaxWaitCounter.inc();
                    } else if (qe != null
                            && ((bufferedEntriesThreshold > 0 && toFlush.size() > bufferedEntriesThreshold)
                                    || (bc.position() > lastFlushPosition + bufferedWritesThreshold))) {
                        // 2. If we have buffered more than the buffWriteThreshold or bufferedEntriesThreshold
                        shouldFlush = true;
                        flushMaxOutstandingBytesCounter.inc();
                    } else if (qe == null) {
                        // We should get here only if we flushWhenQueueEmpty is true else we would wait
                        // for timeout that would put is past the maxWait threshold
                        // 3. If the queue is empty i.e. no benefit of grouping. This happens when we have one
                        // publish at a time - common case in tests.
                        shouldFlush = true;
                        flushEmptyQueueCounter.inc();
                    }

                    // toFlush is non null and not empty so should be safe to access getFirst
                    if (shouldFlush) {
                        if (conf.getJournalFormatVersionToWrite() >= JournalChannel.V5) {
                            writePaddingBytes(logFile, paddingBuff, conf.getJournalAlignmentSize());
                        }
                        journalFlushWatcher.reset().start();
                        bc.flush(false);
                        lastFlushPosition = bc.position();
                        journalFlushStats.registerSuccessfulEvent(
                                journalFlushWatcher.stop().elapsedTime(TimeUnit.NANOSECONDS),
                                TimeUnit.NANOSECONDS);

                        // Trace the lifetime of entries through persistence
                        if (LOG.isDebugEnabled()) {
                            for (QueueEntry e : toFlush) {
                                LOG.debug("Written and queuing for flush Ledger:" + e.ledgerId + " Entry:"
                                        + e.entryId);
                            }
                        }

                        forceWriteBatchEntriesStats.registerSuccessfulValue(toFlush.size());
                        forceWriteBatchBytesStats.registerSuccessfulValue(batchSize);

                        forceWriteRequests.put(new ForceWriteRequest(logFile, logId, lastFlushPosition, toFlush,
                                (lastFlushPosition > maxJournalSize), false));
                        toFlush = new LinkedList<QueueEntry>();
                        batchSize = 0L;
                        // check whether journal file is over file limit
                        if (bc.position() > maxJournalSize) {
                            logFile = null;
                            continue;
                        }
                    }
                }
            }

            if (!running) {
                LOG.info("Journal Manager is asked to shut down, quit.");
                break;
            }

            if (qe == null) { // no more queue entry
                continue;
            }

            journalWriteBytes.add(qe.entry.remaining());
            journalQueueSize.dec();

            batchSize += (4 + qe.entry.remaining());

            lenBuff.clear();
            lenBuff.putInt(qe.entry.remaining());
            lenBuff.flip();

            // preAlloc based on size
            logFile.preAllocIfNeeded(4 + qe.entry.remaining());

            //
            // we should be doing the following, but then we run out of
            // direct byte buffers
            // logFile.write(new ByteBuffer[] { lenBuff, qe.entry });
            bc.write(lenBuff);
            bc.write(qe.entry);

            toFlush.add(qe);
            qe = null;
        }
        logFile.close();
        logFile = null;
    } catch (IOException ioe) {
        LOG.error("I/O exception in Journal thread!", ioe);
    } catch (InterruptedException ie) {
        LOG.warn("Journal exits when shutting down", ie);
    } finally {
        // There could be packets queued for forceWrite on this logFile
        // That is fine as this exception is going to anyway take down the
        // the bookie. If we execute this as a part of graceful shutdown,
        // close will flush the file system cache making any previous
        // cached writes durable so this is fine as well.
        IOUtils.close(LOG, logFile);
    }
    LOG.info("Journal exited loop!");
}

From source file:com.thinkbiganalytics.feedmgr.nifi.CreateFeedBuilder.java

private ProcessGroupDTO createProcessGroupForFeed() throws FeedCreationException {
    Stopwatch stopwatch = Stopwatch.createStarted();
    //create Category Process group
    this.categoryGroup = niFiObjectCache.getCategoryProcessGroup(category);
    if (categoryGroup == null) {
        try {//from   w  w  w  .  j av a2 s  .  co m
            ProcessGroupDTO group = restClient.createProcessGroup(category);
            this.categoryGroup = group;
            this.newCategory = true;
            if (this.categoryGroup != null) {
                niFiObjectCache.addCategoryProcessGroup(this.categoryGroup);
            }
        } catch (Exception e) {
            //Swallow exception... it will be handled later
        }
    }
    if (this.categoryGroup == null) {
        throw new FeedCreationException("Unable to get or create the Process group for the Category " + category
                + ". Error occurred while creating instance of template " + templateId + " for Feed "
                + feedName);
    }
    stopwatch.stop();
    log.debug("Time to get/create Category Process Group:{} was: {} ms", category,
            stopwatch.elapsed(TimeUnit.MILLISECONDS));
    stopwatch.reset();

    stopwatch.start();
    //1 create the processGroup
    //check to see if the feed exists... if so version off the old group and create a new group with this feed
    ProcessGroupDTO feedGroup = restClient.getProcessGroupByName(this.categoryGroup.getId(), feedName);
    stopwatch.stop();
    log.debug("Time to find feed Process Group: {} was: {} ms", feedName,
            stopwatch.elapsed(TimeUnit.MILLISECONDS));
    stopwatch.reset();
    if (feedGroup != null) {
        try {
            previousFeedProcessGroup = feedGroup;
            templateCreationHelper.versionProcessGroup(feedGroup);
        } catch (Exception e) {
            throw new FeedCreationException("Previous version of the feed " + feedName
                    + " was found.  Error in attempting to version the previous feed.  Please go into Nifi and address any issues with the Feeds Process Group",
                    e);
        }
    }

    ProcessGroupDTO group = restClient.createProcessGroup(this.categoryGroup.getId(), feedName);

    return group;
}

From source file:com.thinkbiganalytics.feedmgr.nifi.CreateFeedBuilder.java

private void connectFeedToReusableTemplate(ProcessGroupDTO feedProcessGroup,
        ProcessGroupDTO categoryProcessGroup) throws NifiComponentNotFoundException {

    Stopwatch stopwatch = Stopwatch.createStarted();
    String categoryProcessGroupId = categoryProcessGroup.getId();
    String categoryParentGroupId = categoryProcessGroup.getParentGroupId();
    String categoryProcessGroupName = categoryProcessGroup.getName();
    String feedProcessGroupId = feedProcessGroup.getId();
    String feedProcessGroupName = feedProcessGroup.getName();

    ProcessGroupDTO reusableTemplateCategory = niFiObjectCache.getReusableTemplateCategoryProcessGroup();

    if (reusableTemplateCategory == null) {
        throw new NifiClientRuntimeException(
                "Unable to find the Reusable Template Group. Please ensure NiFi has the 'reusable_templates' processgroup and appropriate reusable flow for this feed."
                        + " You may need to import the base reusable template for this feed.");
    }/* w  ww  .ja  va  2  s  .c  om*/
    String reusableTemplateCategoryGroupId = reusableTemplateCategory.getId();
    stopwatch.stop();
    log.debug("Time to get reusableTemplateCategory: {} ", stopwatch.elapsed(TimeUnit.MILLISECONDS));
    stopwatch.reset();

    Stopwatch totalStopWatch = Stopwatch.createUnstarted();
    for (InputOutputPort port : inputOutputPorts) {
        totalStopWatch.start();
        stopwatch.start();
        PortDTO reusableTemplatePort = niFiObjectCache.getReusableTemplateInputPort(port.getInputPortName());
        stopwatch.stop();
        log.debug("Time to get reusableTemplate inputPort {} : {} ", port.getInputPortName(),
                stopwatch.elapsed(TimeUnit.MILLISECONDS));
        stopwatch.reset();
        if (reusableTemplatePort != null) {

            String categoryOutputPortName = categoryProcessGroupName + " to " + port.getInputPortName();
            stopwatch.start();
            PortDTO categoryOutputPort = niFiObjectCache.getCategoryOutputPort(categoryProcessGroupId,
                    categoryOutputPortName);
            stopwatch.stop();
            log.debug("Time to get categoryOutputPort {} : {} ", categoryOutputPortName,
                    stopwatch.elapsed(TimeUnit.MILLISECONDS));
            stopwatch.reset();
            if (categoryOutputPort == null) {
                stopwatch.start();
                //create it
                PortDTO portDTO = new PortDTO();
                portDTO.setParentGroupId(categoryProcessGroupId);
                portDTO.setName(categoryOutputPortName);
                categoryOutputPort = restClient.getNiFiRestClient().processGroups()
                        .createOutputPort(categoryProcessGroupId, portDTO);
                niFiObjectCache.addCategoryOutputPort(categoryProcessGroupId, categoryOutputPort);
                stopwatch.stop();
                log.debug("Time to create categoryOutputPort {} : {} ", categoryOutputPortName,
                        stopwatch.elapsed(TimeUnit.MILLISECONDS));
                stopwatch.reset();

            }
            stopwatch.start();
            Set<PortDTO> feedOutputPorts = feedProcessGroup.getContents().getOutputPorts();
            String feedOutputPortName = port.getOutputPortName();
            if (feedOutputPorts == null || feedOutputPorts.isEmpty()) {
                feedOutputPorts = restClient.getNiFiRestClient().processGroups()
                        .getOutputPorts(feedProcessGroup.getId());
            }
            PortDTO feedOutputPort = NifiConnectionUtil.findPortMatchingName(feedOutputPorts,
                    feedOutputPortName);
            stopwatch.stop();
            log.debug("Time to create feedOutputPort {} : {} ", feedOutputPortName,
                    stopwatch.elapsed(TimeUnit.MILLISECONDS));
            stopwatch.reset();
            if (feedOutputPort != null) {
                stopwatch.start();
                //make the connection on the category from feed to category
                ConnectionDTO feedOutputToCategoryOutputConnection = niFiObjectCache.getConnection(
                        categoryProcessGroupId, feedOutputPort.getId(), categoryOutputPort.getId());
                stopwatch.stop();
                log.debug("Time to get feedOutputToCategoryOutputConnection: {} ",
                        stopwatch.elapsed(TimeUnit.MILLISECONDS));
                stopwatch.reset();
                if (feedOutputToCategoryOutputConnection == null) {
                    stopwatch.start();
                    //CONNECT FEED OUTPUT PORT TO THE Category output port
                    ConnectableDTO source = new ConnectableDTO();
                    source.setGroupId(feedProcessGroupId);
                    source.setId(feedOutputPort.getId());
                    source.setName(feedProcessGroupName);
                    source.setType(NifiConstants.NIFI_PORT_TYPE.OUTPUT_PORT.name());
                    ConnectableDTO dest = new ConnectableDTO();
                    dest.setGroupId(categoryProcessGroupId);
                    dest.setName(categoryOutputPort.getName());
                    dest.setId(categoryOutputPort.getId());
                    dest.setType(NifiConstants.NIFI_PORT_TYPE.OUTPUT_PORT.name());
                    feedOutputToCategoryOutputConnection = restClient.createConnection(categoryProcessGroupId,
                            source, dest);
                    niFiObjectCache.addConnection(categoryProcessGroupId, feedOutputToCategoryOutputConnection);
                    nifiFlowCache.addConnectionToCache(feedOutputToCategoryOutputConnection);
                    stopwatch.stop();
                    log.debug("Time to create feedOutputToCategoryOutputConnection: {} ",
                            stopwatch.elapsed(TimeUnit.MILLISECONDS));
                    stopwatch.reset();
                }

                stopwatch.start();
                //connection made on parent (root) to reusable template
                ConnectionDTO categoryToReusableTemplateConnection = niFiObjectCache.getConnection(
                        categoryProcessGroup.getParentGroupId(), categoryOutputPort.getId(),
                        reusableTemplatePort.getId());
                stopwatch.stop();
                log.debug("Time to get categoryToReusableTemplateConnection: {} ",
                        stopwatch.elapsed(TimeUnit.MILLISECONDS));
                stopwatch.reset();
                //Now connect the category ProcessGroup to the global template
                if (categoryToReusableTemplateConnection == null) {
                    stopwatch.start();
                    ConnectableDTO categorySource = new ConnectableDTO();
                    categorySource.setGroupId(categoryProcessGroupId);
                    categorySource.setId(categoryOutputPort.getId());
                    categorySource.setName(categoryOutputPortName);
                    categorySource.setType(NifiConstants.NIFI_PORT_TYPE.OUTPUT_PORT.name());
                    ConnectableDTO categoryToGlobalTemplate = new ConnectableDTO();
                    categoryToGlobalTemplate.setGroupId(reusableTemplateCategoryGroupId);
                    categoryToGlobalTemplate.setId(reusableTemplatePort.getId());
                    categoryToGlobalTemplate.setName(reusableTemplatePort.getName());
                    categoryToGlobalTemplate.setType(NifiConstants.NIFI_PORT_TYPE.INPUT_PORT.name());
                    categoryToReusableTemplateConnection = restClient.createConnection(categoryParentGroupId,
                            categorySource, categoryToGlobalTemplate);
                    niFiObjectCache.addConnection(categoryParentGroupId, categoryToReusableTemplateConnection);
                    nifiFlowCache.addConnectionToCache(categoryToReusableTemplateConnection);
                    stopwatch.stop();
                    log.debug("Time to create categoryToReusableTemplateConnection: {} ",
                            stopwatch.elapsed(TimeUnit.MILLISECONDS));
                    stopwatch.reset();
                }
            }

        }
        totalStopWatch.stop();
        log.debug("Time to connect feed to {} port. ElapsedTime: {} ", port.getInputPortName(),
                totalStopWatch.elapsed(TimeUnit.MILLISECONDS));
        totalStopWatch.reset();
    }

}

From source file:org.caleydo.core.data.collection.table.NumericalTable.java

/**
 *
 *//*from w  ww  .  j  a  v  a  2s  . c  o  m*/
private void performImputation(KNNImputeDescription desc) {

    Stopwatch w = new Stopwatch().start();
    ImmutableList.Builder<Gene> b = ImmutableList.builder();
    final int rows = getNrRows();
    final int cols = columns.size();

    // create data
    if (desc.getDimension().isRecord()) {
        for (int i = 0; i < rows; ++i) {
            float[] data = new float[cols];
            int nans = 0;
            int j = 0;
            for (AColumn<?, ?> column : columns) {
                @SuppressWarnings("unchecked")
                NumericalColumn<?, Float> nColumn = (NumericalColumn<?, Float>) column;
                Float raw = nColumn.getRaw(i);
                if (raw == null || raw.isNaN())
                    nans++;
                data[j++] = raw == null ? Float.NaN : raw.floatValue();
            }
            b.add(new Gene(i, nans, data));
        }
    } else {
        int i = 0;
        for (AColumn<?, ?> column : columns) {
            float[] data = new float[rows];
            int nans = 0;
            @SuppressWarnings("unchecked")
            NumericalColumn<?, Float> nColumn = (NumericalColumn<?, Float>) column;

            for (int j = 0; j < rows; j++) {
                Float raw = nColumn.getRaw(i);
                if (raw == null || raw.isNaN())
                    nans++;
                data[j++] = raw == null ? Float.NaN : raw.floatValue();
            }
            b.add(new Gene(i++, nans, data));
        }
    }

    System.out.println("NumericalTable.performImputation() data creation:\t" + w);
    w.reset().start();
    KNNImpute task = new KNNImpute(desc, b.build());
    ForkJoinPool pool = new ForkJoinPool();
    com.google.common.collect.Table<Integer, Integer, Float> impute = pool.invoke(task);
    pool.shutdown();
    System.out.println("NumericalTable.performImputation() computation:\t" + w);
    w.reset().start();

    // update data
    final boolean isColumnFirstDimension = desc.getDimension().isDimension();
    // in either case iterate over the columns first and update a columns at once
    for (Map.Entry<Integer, Map<Integer, Float>> entry : (isColumnFirstDimension ? impute.rowMap()
            : impute.columnMap()).entrySet()) {
        AColumn<?, ?> aColumn = columns.get(entry.getKey().intValue());
        @SuppressWarnings("unchecked")
        NumericalColumn<?, Float> nColumn = (NumericalColumn<?, Float>) aColumn;
        // apply updates
        for (Map.Entry<Integer, Float> entry2 : entry.getValue().entrySet()) {
            nColumn.setRaw(entry2.getKey(), entry2.getValue());
        }
    }
    System.out.println("NumericalTable.performImputation() update:\t" + w);
}