Example usage for com.google.common.base Stopwatch start

List of usage examples for com.google.common.base Stopwatch start

Introduction

In this page you can find the example usage for com.google.common.base Stopwatch start.

Prototype

public Stopwatch start() 

Source Link

Document

Starts the stopwatch.

Usage

From source file:org.opendaylight.controller.config.persist.impl.ConfigPusherImpl.java

private synchronized boolean pushConfigWithConflictingVersionRetries(ConfigSnapshotHolder configSnapshotHolder)
        throws ConfigSnapshotFailureException {
    ConflictingVersionException lastException;
    Stopwatch stopwatch = Stopwatch.createUnstarted();
    do {//  ww w .j  a va 2 s  . c  om
        //TODO wait untill all expected modules are in yangStoreService, do we even need to with yangStoreService instead on netconfOperationService?
        String idForReporting = configSnapshotHolder.toString();
        SortedSet<String> expectedCapabilities = checkNotNull(configSnapshotHolder.getCapabilities(),
                "Expected capabilities must not be null - %s, check %s", idForReporting,
                configSnapshotHolder.getClass().getName());

        // wait max time for required capabilities to appear
        waitForCapabilities(expectedCapabilities, idForReporting);
        try {
            if (!stopwatch.isRunning()) {
                stopwatch.start();
            }
            return pushConfig(configSnapshotHolder);
        } catch (ConflictingVersionException e) {
            lastException = e;
            LOG.info("Conflicting version detected, will retry after timeout");
            sleep();
        }
    } while (stopwatch.elapsed(TimeUnit.MILLISECONDS) < conflictingVersionTimeoutMillis);
    throw new IllegalStateException("Max wait for conflicting version stabilization timeout after "
            + stopwatch.elapsed(TimeUnit.MILLISECONDS) + " ms", lastException);
}

From source file:org.apache.drill.exec.store.parquet.metadata.Metadata.java

/**
 * Get the parquet metadata for the parquet files in a directory.
 *
 * @param path the path of the directory
 * @return metadata object for an entire parquet directory structure
 * @throws IOException in case of problems during accessing files
 *//*from  w  w  w  .  j a  v  a  2s. c om*/
private ParquetTableMetadata_v3 getParquetTableMetadata(String path, FileSystem fs) throws IOException {
    Path p = new Path(path);
    FileStatus fileStatus = fs.getFileStatus(p);
    Stopwatch watch = logger.isDebugEnabled() ? Stopwatch.createStarted() : null;
    List<FileStatus> fileStatuses = new ArrayList<>();
    if (fileStatus.isFile()) {
        fileStatuses.add(fileStatus);
    } else {
        fileStatuses.addAll(DrillFileSystemUtil.listFiles(fs, p, true));
    }
    if (watch != null) {
        logger.debug("Took {} ms to get file statuses", watch.elapsed(TimeUnit.MILLISECONDS));
        watch.reset();
        watch.start();
    }

    Map<FileStatus, FileSystem> fileStatusMap = fileStatuses.stream().collect(java.util.stream.Collectors
            .toMap(Function.identity(), s -> fs, (oldFs, newFs) -> newFs, LinkedHashMap::new));

    ParquetTableMetadata_v3 metadata_v3 = getParquetTableMetadata(fileStatusMap);
    if (watch != null) {
        logger.debug("Took {} ms to read file metadata", watch.elapsed(TimeUnit.MILLISECONDS));
        watch.stop();
    }
    return metadata_v3;
}

From source file:com.thinkbiganalytics.feedmgr.nifi.CreateFeedBuilder.java

private void connectFeedToReusableTemplate(ProcessGroupDTO feedProcessGroup,
        ProcessGroupDTO categoryProcessGroup) throws NifiComponentNotFoundException {

    Stopwatch stopwatch = Stopwatch.createStarted();
    String categoryProcessGroupId = categoryProcessGroup.getId();
    String categoryParentGroupId = categoryProcessGroup.getParentGroupId();
    String categoryProcessGroupName = categoryProcessGroup.getName();
    String feedProcessGroupId = feedProcessGroup.getId();
    String feedProcessGroupName = feedProcessGroup.getName();

    ProcessGroupDTO reusableTemplateCategory = niFiObjectCache.getReusableTemplateCategoryProcessGroup();

    if (reusableTemplateCategory == null) {
        throw new NifiClientRuntimeException(
                "Unable to find the Reusable Template Group. Please ensure NiFi has the 'reusable_templates' processgroup and appropriate reusable flow for this feed."
                        + " You may need to import the base reusable template for this feed.");
    }//from  w w  w  .j a v a 2  s  .c o m
    String reusableTemplateCategoryGroupId = reusableTemplateCategory.getId();
    stopwatch.stop();
    log.debug("Time to get reusableTemplateCategory: {} ", stopwatch.elapsed(TimeUnit.MILLISECONDS));
    stopwatch.reset();

    Stopwatch totalStopWatch = Stopwatch.createUnstarted();
    for (InputOutputPort port : inputOutputPorts) {
        totalStopWatch.start();
        stopwatch.start();
        PortDTO reusableTemplatePort = niFiObjectCache.getReusableTemplateInputPort(port.getInputPortName());
        stopwatch.stop();
        log.debug("Time to get reusableTemplate inputPort {} : {} ", port.getInputPortName(),
                stopwatch.elapsed(TimeUnit.MILLISECONDS));
        stopwatch.reset();
        if (reusableTemplatePort != null) {

            String categoryOutputPortName = categoryProcessGroupName + " to " + port.getInputPortName();
            stopwatch.start();
            PortDTO categoryOutputPort = niFiObjectCache.getCategoryOutputPort(categoryProcessGroupId,
                    categoryOutputPortName);
            stopwatch.stop();
            log.debug("Time to get categoryOutputPort {} : {} ", categoryOutputPortName,
                    stopwatch.elapsed(TimeUnit.MILLISECONDS));
            stopwatch.reset();
            if (categoryOutputPort == null) {
                stopwatch.start();
                //create it
                PortDTO portDTO = new PortDTO();
                portDTO.setParentGroupId(categoryProcessGroupId);
                portDTO.setName(categoryOutputPortName);
                categoryOutputPort = restClient.getNiFiRestClient().processGroups()
                        .createOutputPort(categoryProcessGroupId, portDTO);
                niFiObjectCache.addCategoryOutputPort(categoryProcessGroupId, categoryOutputPort);
                stopwatch.stop();
                log.debug("Time to create categoryOutputPort {} : {} ", categoryOutputPortName,
                        stopwatch.elapsed(TimeUnit.MILLISECONDS));
                stopwatch.reset();

            }
            stopwatch.start();
            Set<PortDTO> feedOutputPorts = feedProcessGroup.getContents().getOutputPorts();
            String feedOutputPortName = port.getOutputPortName();
            if (feedOutputPorts == null || feedOutputPorts.isEmpty()) {
                feedOutputPorts = restClient.getNiFiRestClient().processGroups()
                        .getOutputPorts(feedProcessGroup.getId());
            }
            PortDTO feedOutputPort = NifiConnectionUtil.findPortMatchingName(feedOutputPorts,
                    feedOutputPortName);
            stopwatch.stop();
            log.debug("Time to create feedOutputPort {} : {} ", feedOutputPortName,
                    stopwatch.elapsed(TimeUnit.MILLISECONDS));
            stopwatch.reset();
            if (feedOutputPort != null) {
                stopwatch.start();
                //make the connection on the category from feed to category
                ConnectionDTO feedOutputToCategoryOutputConnection = niFiObjectCache.getConnection(
                        categoryProcessGroupId, feedOutputPort.getId(), categoryOutputPort.getId());
                stopwatch.stop();
                log.debug("Time to get feedOutputToCategoryOutputConnection: {} ",
                        stopwatch.elapsed(TimeUnit.MILLISECONDS));
                stopwatch.reset();
                if (feedOutputToCategoryOutputConnection == null) {
                    stopwatch.start();
                    //CONNECT FEED OUTPUT PORT TO THE Category output port
                    ConnectableDTO source = new ConnectableDTO();
                    source.setGroupId(feedProcessGroupId);
                    source.setId(feedOutputPort.getId());
                    source.setName(feedProcessGroupName);
                    source.setType(NifiConstants.NIFI_PORT_TYPE.OUTPUT_PORT.name());
                    ConnectableDTO dest = new ConnectableDTO();
                    dest.setGroupId(categoryProcessGroupId);
                    dest.setName(categoryOutputPort.getName());
                    dest.setId(categoryOutputPort.getId());
                    dest.setType(NifiConstants.NIFI_PORT_TYPE.OUTPUT_PORT.name());
                    feedOutputToCategoryOutputConnection = restClient.createConnection(categoryProcessGroupId,
                            source, dest);
                    niFiObjectCache.addConnection(categoryProcessGroupId, feedOutputToCategoryOutputConnection);
                    nifiFlowCache.addConnectionToCache(feedOutputToCategoryOutputConnection);
                    stopwatch.stop();
                    log.debug("Time to create feedOutputToCategoryOutputConnection: {} ",
                            stopwatch.elapsed(TimeUnit.MILLISECONDS));
                    stopwatch.reset();
                }

                stopwatch.start();
                //connection made on parent (root) to reusable template
                ConnectionDTO categoryToReusableTemplateConnection = niFiObjectCache.getConnection(
                        categoryProcessGroup.getParentGroupId(), categoryOutputPort.getId(),
                        reusableTemplatePort.getId());
                stopwatch.stop();
                log.debug("Time to get categoryToReusableTemplateConnection: {} ",
                        stopwatch.elapsed(TimeUnit.MILLISECONDS));
                stopwatch.reset();
                //Now connect the category ProcessGroup to the global template
                if (categoryToReusableTemplateConnection == null) {
                    stopwatch.start();
                    ConnectableDTO categorySource = new ConnectableDTO();
                    categorySource.setGroupId(categoryProcessGroupId);
                    categorySource.setId(categoryOutputPort.getId());
                    categorySource.setName(categoryOutputPortName);
                    categorySource.setType(NifiConstants.NIFI_PORT_TYPE.OUTPUT_PORT.name());
                    ConnectableDTO categoryToGlobalTemplate = new ConnectableDTO();
                    categoryToGlobalTemplate.setGroupId(reusableTemplateCategoryGroupId);
                    categoryToGlobalTemplate.setId(reusableTemplatePort.getId());
                    categoryToGlobalTemplate.setName(reusableTemplatePort.getName());
                    categoryToGlobalTemplate.setType(NifiConstants.NIFI_PORT_TYPE.INPUT_PORT.name());
                    categoryToReusableTemplateConnection = restClient.createConnection(categoryParentGroupId,
                            categorySource, categoryToGlobalTemplate);
                    niFiObjectCache.addConnection(categoryParentGroupId, categoryToReusableTemplateConnection);
                    nifiFlowCache.addConnectionToCache(categoryToReusableTemplateConnection);
                    stopwatch.stop();
                    log.debug("Time to create categoryToReusableTemplateConnection: {} ",
                            stopwatch.elapsed(TimeUnit.MILLISECONDS));
                    stopwatch.reset();
                }
            }

        }
        totalStopWatch.stop();
        log.debug("Time to connect feed to {} port. ElapsedTime: {} ", port.getInputPortName(),
                totalStopWatch.elapsed(TimeUnit.MILLISECONDS));
        totalStopWatch.reset();
    }

}

From source file:org.sleuthkit.autopsy.timeline.events.db.EventDB.java

/**
 * count all the events with the given options and return a map organizing
 * the counts in a hierarchy from date > eventtype> count
 *
 *
 * @param startTime events before this time will be excluded (seconds from
 *                  unix epoch)//from  w w  w . j av a 2  s. c om
 * @param endTime   events at or after this time will be excluded (seconds
 *                  from unix epoch)
 * @param filter    only events that pass this filter will be counted
 * @param zoomLevel only events of this type or a subtype will be counted
 *                  and the counts will be organized into bins for each of the subtypes of
 *                  the given event type
 *
 * @return a map organizing the counts in a hierarchy from date > eventtype>
 *         count
 */
private Map<EventType, Long> countEvents(Long startTime, Long endTime, Filter filter,
        EventTypeZoomLevel zoomLevel) {
    if (Objects.equals(startTime, endTime)) {
        endTime++;
    }

    Map<EventType, Long> typeMap = new HashMap<>();

    //do we want the root or subtype column of the databse
    final boolean useSubTypes = (zoomLevel == EventTypeZoomLevel.SUB_TYPE);

    //get some info about the range of dates requested
    final String queryString = "select count(*), " + (useSubTypes ? SUB_TYPE_COLUMN : BASE_TYPE_COLUMN) // NON-NLS
            + " from events where time >= " + startTime + " and time < " + endTime + " and "
            + getSQLWhere(filter) // NON-NLS
            + " GROUP BY " + (useSubTypes ? SUB_TYPE_COLUMN : BASE_TYPE_COLUMN); // NON-NLS

    ResultSet rs = null;
    dbReadLock();
    //System.out.println(queryString);
    try (Statement stmt = con.createStatement();) {
        Stopwatch stopwatch = new Stopwatch();
        stopwatch.start();
        rs = stmt.executeQuery(queryString);
        stopwatch.stop();
        // System.out.println(stopwatch.elapsedMillis() / 1000.0 + " seconds");
        while (rs.next()) {

            EventType type = useSubTypes ? RootEventType.allTypes.get(rs.getInt(SUB_TYPE_COLUMN))
                    : BaseTypes.values()[rs.getInt(BASE_TYPE_COLUMN)];

            typeMap.put(type, rs.getLong("count(*)")); // NON-NLS
        }

    } catch (Exception ex) {
        LOGGER.log(Level.SEVERE, "error getting count of events from db.", ex); // NON-NLS
    } finally {
        try {
            rs.close();
        } catch (SQLException ex) {
            Exceptions.printStackTrace(ex);
        }
        dbReadUnlock();
    }
    return typeMap;
}

From source file:com.facebook.buck.distributed.DistBuildArtifactCacheImpl.java

@Override
public synchronized void prewarmRemoteContains(ImmutableSet<BuildRule> rulesToBeChecked) {
    @SuppressWarnings("PMD.PrematureDeclaration")
    Stopwatch stopwatch = Stopwatch.createStarted();
    Set<BuildRule> unseenRules = rulesToBeChecked.stream()
            .filter(rule -> !remoteCacheContainsFutures.containsKey(rule)).collect(Collectors.toSet());

    if (unseenRules.isEmpty()) {
        return;/*from   w  ww.ja  v a 2 s. co  m*/
    }

    LOG.info("Checking remote cache for [%d] new rules.", unseenRules.size());
    Map<BuildRule, ListenableFuture<RuleKey>> rulesToKeys = Maps.asMap(unseenRules,
            rule -> ruleKeyCalculator.calculate(eventBus, rule));

    ListenableFuture<Map<RuleKey, CacheResult>> keysToCacheResultFuture = Futures
            .transformAsync(Futures.allAsList(rulesToKeys.values()), ruleKeys -> {
                LOG.info("Computing RuleKeys for %d new rules took %dms.", unseenRules.size(),
                        stopwatch.elapsed(TimeUnit.MILLISECONDS));
                stopwatch.reset();
                stopwatch.start();
                return multiContainsAsync(ruleKeys);
            }, executorService);

    Map<BuildRule, ListenableFuture<Boolean>> containsResultsForUnseenRules = Maps
            .asMap(unseenRules,
                    rule -> Futures.transform(keysToCacheResultFuture, keysToCacheResult -> Objects
                            .requireNonNull(keysToCacheResult.get(Futures.getUnchecked(rulesToKeys.get(rule))))
                            .getType().isSuccess(), MoreExecutors.directExecutor()));

    remoteCacheContainsFutures.putAll(containsResultsForUnseenRules);
    Futures.allAsList(containsResultsForUnseenRules.values())
            .addListener(() -> LOG.info("Checking the remote cache for %d rules took %dms.", unseenRules.size(),
                    stopwatch.elapsed(TimeUnit.MILLISECONDS)), MoreExecutors.directExecutor());
}

From source file:org.sleuthkit.autopsy.timeline.events.db.EventDB.java

/**
 * //TODO: update javadoc //TODO: split this into helper methods
 *
 * get a list of {@link AggregateEvent}s.
 *
 * General algorithm is as follows:/*from   w  w w. j  ava2 s . c om*/
 *
 * - get all aggregate events, via one db query.
 * - sort them into a map from (type, description)-> aggevent
 * - for each key in map, merge the events and accumulate them in a list
 * to return
 *
 *
 * @param timeRange the Interval within in which all returned aggregate
 *                  events will be.
 * @param filter    only events that pass the filter will be included in
 *                  aggregates events returned
 * @param zoomLevel only events of this level will be included
 * @param lod       description level of detail to use when grouping events
 *
 *
 * @return a list of aggregate events within the given timerange, that pass
 *         the supplied filter, aggregated according to the given event type and
 *         description zoom levels
 */
private List<AggregateEvent> getAggregatedEvents(Interval timeRange, Filter filter,
        EventTypeZoomLevel zoomLevel, DescriptionLOD lod) {
    String descriptionColumn = getDescriptionColumn(lod);
    final boolean useSubTypes = (zoomLevel.equals(EventTypeZoomLevel.SUB_TYPE));

    //get some info about the time range requested
    RangeDivisionInfo rangeInfo = RangeDivisionInfo.getRangeDivisionInfo(timeRange);
    //use 'rounded out' range
    long start = timeRange.getStartMillis() / 1000;//.getLowerBound();
    long end = timeRange.getEndMillis() / 1000;//Millis();//rangeInfo.getUpperBound();
    if (Objects.equals(start, end)) {
        end++;
    }

    //get a sqlite srtftime format string
    String strfTimeFormat = getStrfTimeFormat(rangeInfo.getPeriodSize());

    //effectively map from type to (map from description to events)
    Map<EventType, SetMultimap<String, AggregateEvent>> typeMap = new HashMap<>();

    //get all agregate events in this time unit
    dbReadLock();
    String query = "select strftime('" + strfTimeFormat + "',time , 'unixepoch'"
            + (TimeLineController.getTimeZone().get().equals(TimeZone.getDefault()) ? ", 'localtime'" : "")
            + ") as interval,  group_concat(event_id) as event_ids, Min(time), Max(time),  " + descriptionColumn
            + ", " + (useSubTypes ? SUB_TYPE_COLUMN : BASE_TYPE_COLUMN) // NON-NLS
            + " from events where time >= " + start + " and time < " + end + " and " + getSQLWhere(filter) // NON-NLS
            + " group by interval, " + (useSubTypes ? SUB_TYPE_COLUMN : BASE_TYPE_COLUMN) + " , "
            + descriptionColumn // NON-NLS
            + " order by Min(time)"; // NON-NLS
    //System.out.println(query);
    ResultSet rs = null;
    try (Statement stmt = con.createStatement(); // scoop up requested events in groups organized by interval, type, and desription
    ) {

        Stopwatch stopwatch = new Stopwatch();
        stopwatch.start();

        rs = stmt.executeQuery(query);
        stopwatch.stop();
        //System.out.println(stopwatch.elapsedMillis() / 1000.0 + " seconds");
        while (rs.next()) {
            EventType type = useSubTypes ? RootEventType.allTypes.get(rs.getInt(SUB_TYPE_COLUMN))
                    : BaseTypes.values()[rs.getInt(BASE_TYPE_COLUMN)];

            AggregateEvent aggregateEvent = new AggregateEvent(
                    new Interval(rs.getLong("Min(time)") * 1000, rs.getLong("Max(time)") * 1000,
                            TimeLineController.getJodaTimeZone()), // NON-NLS
                    type, Arrays.asList(rs.getString("event_ids").split(",")), // NON-NLS
                    rs.getString(descriptionColumn), lod);

            //put events in map from type/descrition -> event
            SetMultimap<String, AggregateEvent> descrMap = typeMap.get(type);
            if (descrMap == null) {
                descrMap = HashMultimap.<String, AggregateEvent>create();
                typeMap.put(type, descrMap);
            }
            descrMap.put(aggregateEvent.getDescription(), aggregateEvent);
        }

    } catch (SQLException ex) {
        Exceptions.printStackTrace(ex);
    } finally {
        try {
            rs.close();
        } catch (SQLException ex) {
            Exceptions.printStackTrace(ex);
        }
        dbReadUnlock();
    }

    //result list to return
    ArrayList<AggregateEvent> aggEvents = new ArrayList<>();

    //save this for use when comparing gap size
    Period timeUnitLength = rangeInfo.getPeriodSize().getPeriod();

    //For each (type, description) key, merge agg events
    for (SetMultimap<String, AggregateEvent> descrMap : typeMap.values()) {
        for (String descr : descrMap.keySet()) {
            //run through the sorted events, merging together adjacent events
            Iterator<AggregateEvent> iterator = descrMap.get(descr).stream()
                    .sorted((AggregateEvent o1, AggregateEvent o2) -> Long
                            .compare(o1.getSpan().getStartMillis(), o2.getSpan().getStartMillis()))
                    .iterator();
            AggregateEvent current = iterator.next();
            while (iterator.hasNext()) {
                AggregateEvent next = iterator.next();
                Interval gap = current.getSpan().gap(next.getSpan());

                //if they overlap or gap is less one quarter timeUnitLength
                //TODO: 1/4 factor is arbitrary. review! -jm
                if (gap == null || gap.toDuration()
                        .getMillis() <= timeUnitLength.toDurationFrom(gap.getStart()).getMillis() / 4) {
                    //merge them
                    current = AggregateEvent.merge(current, next);
                } else {
                    //done merging into current, set next as new current
                    aggEvents.add(current);
                    current = next;
                }
            }
            aggEvents.add(current);
        }
    }

    //at this point we should have a list of aggregate events.
    //one per type/description spanning consecutive time units as determined in rangeInfo
    return aggEvents;
}

From source file:com.twitter.hraven.datasource.AppSummaryService.java

/**
 * gets list of all apps in the specified time frame from the aggregate tables
 * @param cluster/* w w  w.  jav a  2s . c o m*/
 * @param user
 * @param startTime
 * @param endTime
 * @param limit
 * @return {@link List < AppSummary >}
 * @throws IOException
 */
public List<AppSummary> getAllApps(String cluster, String user, long startTime, long endTime, int limit)
        throws IOException {
    // set the time to top of the day minus 1 to make sure that timestamp is included
    long topDayEndTime = Long.MAX_VALUE - getTimestamp(endTime, AggregationConstants.AGGREGATION_TYPE.DAILY)
            - 1;
    // set the time to top of the day plus 1 to make sure that timestamp is included
    long topDayStartTime = Long.MAX_VALUE - getTimestamp(startTime, AggregationConstants.AGGREGATION_TYPE.DAILY)
            + 1;

    byte[] startRow = ByteUtil.join(Constants.SEP_BYTES, Bytes.toBytes(cluster), Bytes.toBytes(topDayEndTime));
    byte[] endRow = ByteUtil.join(Constants.SEP_BYTES, Bytes.toBytes(cluster), Bytes.toBytes(topDayStartTime));

    // start scanning agg table at cluster!inv timestamp![user]
    Scan scan = new Scan();

    if (StringUtils.isNotBlank(user)) {
        startRow = ByteUtil.join(Constants.SEP_BYTES, startRow, Bytes.toBytes(user));
        endRow = ByteUtil.join(Constants.SEP_BYTES, endRow, Bytes.toBytes(user));
        FilterList filters = new FilterList(FilterList.Operator.MUST_PASS_ALL);
        filters.addFilter(new SingleColumnValueFilter(Constants.INFO_FAM_BYTES, AggregationConstants.USER_BYTES,
                CompareFilter.CompareOp.EQUAL, Bytes.toBytes(user)));
        scan.setFilter(filters);
    }
    scan.setStartRow(startRow);
    scan.setStopRow(endRow);
    LOG.info(" scan is " + scan.toJSON());

    Map<AppKey, AppSummary> amap = new HashMap<AppKey, AppSummary>();
    Stopwatch apptimer = new Stopwatch();

    ResultScanner scanner = null;
    try {
        Stopwatch timer = new Stopwatch().start();
        int rowCount = 0;
        long colCount = 0;
        long resultSize = 0;
        scanner = aggDailyTable.getScanner(scan);
        for (Result result : scanner) {
            if (result != null && !result.isEmpty()) {
                rowCount++;
                colCount += result.size();
                resultSize += result.getWritableSize();
                apptimer.start();
                byte[] rowKey = result.getRow();
                AppAggregationKey appAggKey = aggConv.fromBytes(rowKey);
                AppKey ak = new AppKey(cluster, appAggKey.getUserName(), appAggKey.getAppId());
                AppSummary as1 = null;
                if (amap.containsKey(ak)) {
                    as1 = amap.get(ak);
                } else {
                    as1 = new AppSummary(ak);
                    as1.setFirstRunId(appAggKey.getAggregationId());
                    as1.setLastRunId(appAggKey.getAggregationId());
                }
                if (appAggKey.getAggregationId() < as1.getFirstRunId()) {
                    as1.setFirstRunId(appAggKey.getAggregationId());
                }
                if (appAggKey.getAggregationId() > as1.getLastRunId()) {
                    as1.setLastRunId(appAggKey.getAggregationId());
                }
                amap.put(ak, populateAppSummary(result, as1));
                if (amap.size() >= limit) {
                    break;
                }
                apptimer.stop();
            }
        }
        timer.stop();
        LOG.info(" Fetched from hbase " + rowCount + " rows, " + colCount + " columns, " + resultSize
                + " bytes ( " + resultSize / (1024 * 1024) + ") MB, in \n total timer of " + timer
                + " elapsedMillis:" + timer.elapsed(TimeUnit.MILLISECONDS)
                + " that includes \n appSummary population timer of " + apptimer + " elapsedMillis"
                + apptimer.elapsed(TimeUnit.MILLISECONDS) + " \n hbase scan time is "
                + (timer.elapsed(TimeUnit.MILLISECONDS) - apptimer.elapsed(TimeUnit.MILLISECONDS)));
    } finally {
        if (scanner != null) {
            scanner.close();
        }
    }
    LOG.info("Number of distinct apps " + amap.size());
    return new ArrayList<AppSummary>(amap.values());
}

From source file:org.n52.lod.csw.CSWLoDEnabler.java

private void async(final int startPos, final long recordCount, final Stopwatch overallTimer,
        final Stopwatch retrievingTimer, final Stopwatch mappingTimer, final TripleSink serverSink,
        final TripleSink fileSink) {
    // processing queue
    final ConcurrentLinkedQueue<Map<String, GetRecordByIdResponseDocument>> queue = Queues
            .newConcurrentLinkedQueue();

    // main loop download - producer
    ExecutorService downloadExecutor = Executors.newSingleThreadExecutor();
    downloadExecutor.submit(new Runnable() {

        private final Logger logger = LoggerFactory.getLogger("Download Runnable");

        @Override//w w w  .j ava 2 s. c om
        public void run() {
            int i = startPos;
            while (i < recordCount) {
                retrievingTimer.start();
                // Map<String, GetRecordByIdResponseDocument> records =
                // retrieveRecords(i, NUMBER_OF_RECORDS_PER_ITERATION,
                // recordCount);
                Map<String, GetRecordByIdResponseDocument> records = retrieveRecordsThreaded(i,
                        NUMBER_OF_RECORDS_PER_ITERATION, recordCount);
                queue.add(records);
                retrievingTimer.stop();

                i = i + NUMBER_OF_RECORDS_PER_ITERATION;
                logger.debug("Finished intermediate download run at {}", overallTimer.toString());
                logger.info("Retrieved {} records, queue size is now {}", records.size(), queue.size());
            } // end of main loop

            logger.trace("Done - adding the poison pill!");
            queue.add(POISON_PILL);
        }
    });

    // consumer
    ExecutorService mapExecutor = Executors.newSingleThreadExecutor();
    mapExecutor.submit(new Runnable() {

        private final Logger logger = LoggerFactory.getLogger("Map Runnable");

        private boolean isRunning = true;

        @Override
        public void run() {
            while (isRunning) {
                try {
                    Thread.sleep(100);
                } catch (InterruptedException e) {
                    logger.error("Error sleeping in mapping runnable", e);
                }

                try {
                    Map<String, GetRecordByIdResponseDocument> records = queue.poll();

                    if (records == null)
                        continue;

                    if (records == POISON_PILL) {
                        queue.add(POISON_PILL); // notify other threads to
                                                // stop
                        isRunning = false;
                        logger.trace("Got the poison pill!");
                        return;
                    }

                    // process queueElement
                    mappingTimer.start();
                    if (addToServer && serverSink != null)
                        serverSink.addRecords(records, report);
                    if (saveToFile && fileSink != null)
                        fileSink.addRecords(records, report);
                    mappingTimer.stop();

                    logger.debug("Finished intermediate run at {}", overallTimer.toString());

                } catch (RuntimeException e) {
                    logger.error("Error in mapping runnable", e);
                }
            } // end of main loop
        }
    });

    downloadExecutor.shutdown();
    try {
        downloadExecutor.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
    } catch (InterruptedException e) {
        log.error("during shut down of download executor", e);
    }
    mapExecutor.shutdown();
    try {
        mapExecutor.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
    } catch (InterruptedException e) {
        log.error("during shut down of map executor", e);
    }
}

From source file:com.thinkbiganalytics.feedmgr.nifi.CreateFeedBuilder.java

/**
 * Updates a process groups properties/*from w  w  w .j  a  v a2 s.c  o  m*/
 */
private void updateProcessGroupProperties(String processGroupId, String processGroupName)
        throws FeedCreationException {
    Stopwatch stopwatch = Stopwatch.createStarted();
    List<NifiProperty> propertiesToUpdate = restClient.getPropertiesForProcessGroup(processGroupId);
    stopwatch.stop();
    log.debug("Time to get Properties in Feed updateProcessGroupProperties: {} ms",
            stopwatch.elapsed(TimeUnit.MILLISECONDS));

    stopwatch.reset();
    stopwatch.start();
    //get the Root processGroup
    ProcessGroupDTO rootProcessGroup = niFiObjectCache.getRootProcessGroup();
    stopwatch.stop();
    log.debug("Time to get root Process Group in updateProcessGroupProperties: {} ms",
            stopwatch.elapsed(TimeUnit.MILLISECONDS));
    stopwatch.reset();

    stopwatch.start();
    modifiedProperties = new ArrayList<>();
    //resolve the static properties
    //first fill in any properties with static references
    List<NifiProperty> modifiedStaticProperties = propertyExpressionResolver
            .resolveStaticProperties(propertiesToUpdate);
    // now apply any of the incoming metadata properties to this

    List<NifiProperty> modifiedFeedMetadataProperties = NifiPropertyUtil.matchAndSetPropertyValues(
            rootProcessGroup.getName(), processGroupName, propertiesToUpdate, properties);
    modifiedProperties.addAll(modifiedStaticProperties);
    modifiedProperties.addAll(modifiedFeedMetadataProperties);

    stopwatch.stop();
    log.debug("Time to set modifiedProperties: {} ms", stopwatch.elapsed(TimeUnit.MILLISECONDS));
    stopwatch.reset();

    stopwatch.start();
    restClient.updateProcessGroupProperties(modifiedProperties);
    stopwatch.stop();
    log.debug("Time to update properties in the process group: {} ms",
            stopwatch.elapsed(TimeUnit.MILLISECONDS));

}

From source file:org.apache.rocketmq.console.task.DashboardCollectTask.java

@Scheduled(cron = "30 0/1 * * * ?")
@MultiMQAdminCmdMethod(timeoutMillis = 5000)
public void collectTopic() {
    if (!rmqConfigure.isEnableDashBoardCollect()) {
        return;/*  ww  w . j a v  a2 s. c o m*/
    }
    Date date = new Date();
    Stopwatch stopwatch = Stopwatch.createStarted();
    try {
        TopicList topicList = mqAdminExt.fetchAllTopicList();
        Set<String> topicSet = topicList.getTopicList();
        for (String topic : topicSet) {
            if (topic.startsWith(MixAll.RETRY_GROUP_TOPIC_PREFIX)
                    || topic.startsWith(MixAll.DLQ_GROUP_TOPIC_PREFIX)) {
                continue;
            }

            TopicRouteData topicRouteData = mqAdminExt.examineTopicRouteInfo(topic);

            GroupList groupList = mqAdminExt.queryTopicConsumeByWho(topic);

            double inTPS = 0;

            long inMsgCntToday = 0;

            double outTPS = 0;

            long outMsgCntToday = 0;

            for (BrokerData bd : topicRouteData.getBrokerDatas()) {
                String masterAddr = bd.getBrokerAddrs().get(MixAll.MASTER_ID);
                if (masterAddr != null) {
                    try {
                        stopwatch.start();
                        log.info("start time: {}", stopwatch.toString());
                        BrokerStatsData bsd = mqAdminExt.viewBrokerStatsData(masterAddr,
                                BrokerStatsManager.TOPIC_PUT_NUMS, topic);
                        stopwatch.stop();
                        log.info("stop time : {}", stopwatch.toString());
                        stopwatch.reset();
                        inTPS += bsd.getStatsMinute().getTps();
                        inMsgCntToday += StatsAllSubCommand.compute24HourSum(bsd);
                    } catch (Exception e) {
                        //                            throw Throwables.propagate(e);
                    }
                }
            }

            if (groupList != null && !groupList.getGroupList().isEmpty()) {

                for (String group : groupList.getGroupList()) {
                    for (BrokerData bd : topicRouteData.getBrokerDatas()) {
                        String masterAddr = bd.getBrokerAddrs().get(MixAll.MASTER_ID);
                        if (masterAddr != null) {
                            try {
                                String statsKey = String.format("%s@%s", topic, group);
                                BrokerStatsData bsd = mqAdminExt.viewBrokerStatsData(masterAddr,
                                        BrokerStatsManager.GROUP_GET_NUMS, statsKey);
                                outTPS += bsd.getStatsMinute().getTps();
                                outMsgCntToday += StatsAllSubCommand.compute24HourSum(bsd);
                            } catch (Exception e) {
                                //                                    throw Throwables.propagate(e);
                            }
                        }
                    }
                }
            }

            List<String> list;
            try {
                list = dashboardCollectService.getTopicMap().get(topic);
            } catch (ExecutionException e) {
                throw Throwables.propagate(e);
            }
            if (null == list) {
                list = Lists.newArrayList();
            }

            list.add(date.getTime() + "," + new BigDecimal(inTPS).setScale(5, BigDecimal.ROUND_HALF_UP) + ","
                    + inMsgCntToday + "," + new BigDecimal(outTPS).setScale(5, BigDecimal.ROUND_HALF_UP) + ","
                    + outMsgCntToday);
            dashboardCollectService.getTopicMap().put(topic, list);

        }

        log.debug("Topic Collected Data in memory = {}"
                + JsonUtil.obj2String(dashboardCollectService.getTopicMap().asMap()));
    } catch (Exception err) {
        throw Throwables.propagate(err);
    }
}