Example usage for com.google.common.base Stopwatch Stopwatch

List of usage examples for com.google.common.base Stopwatch Stopwatch

Introduction

In this page you can find the example usage for com.google.common.base Stopwatch Stopwatch.

Prototype

Stopwatch() 

Source Link

Usage

From source file:com.twitter.hraven.rest.RestJSONResource.java

@GET
@Path("hdfs/{cluster}/")
@Produces(MediaType.APPLICATION_JSON)/*from  www . j ava2  s.co  m*/
public List<HdfsStats> getHdfsStats(@PathParam("cluster") String cluster,
        // run Id is timestamp in seconds
        @QueryParam("timestamp") long runid, @QueryParam("path") String pathPrefix,
        @QueryParam("limit") int limit) throws IOException {
    if (limit == 0) {
        limit = HdfsConstants.RECORDS_RETURNED_LIMIT;
    }

    boolean noRunId = false;
    if (runid == 0L) {
        // default it to 2 hours back
        long lastHour = System.currentTimeMillis() - 2 * 3600000L;
        // convert milliseconds to seconds
        runid = lastHour / 1000L;
        noRunId = true;
    }

    LOG.info(String.format("Fetching hdfs stats for cluster=%s, path=%s limit=%d, runId=%d", cluster,
            pathPrefix, limit, runid));
    Stopwatch timer = new Stopwatch().start();
    serializationContext.set(new SerializationContext(SerializationContext.DetailLevel.EVERYTHING));
    List<HdfsStats> hdfsStats = getHdfsStatsService().getAllDirs(cluster, pathPrefix, limit, runid);
    timer.stop();
    /**
     * if we find NO hdfs stats for the default timestamp
     * consider the case when no runId is passed
     * in that means user is expecting a default response
     * we set the default runId to 2 hours back
     * as above but what if there was an error in
     * collection at that time? hence we try to look back
     * for some older runIds
     */
    if (hdfsStats == null || hdfsStats.size() == 0L) {
        if (noRunId) {
            // consider reading the daily aggregation table instead of hourly
            // or consider reading older data since runId was a default timestamp
            int retryCount = 0;
            while (retryCount < HdfsConstants.ageMult.length) {
                runid = HdfsStatsService.getOlderRunId(retryCount, runid);
                hdfsStats = getHdfsStatsService().getAllDirs(cluster, pathPrefix, limit, runid);
                if ((hdfsStats != null) && (hdfsStats.size() != 0L)) {
                    break;
                }
                retryCount++;
            }
        }
    }

    // export latency metrics
    HravenResponseMetrics.HDFS_STATS_API_LATENCY_VALUE.set(timer.elapsed(TimeUnit.MILLISECONDS));
    return hdfsStats;
}

From source file:uk.ac.open.kmi.iserve.sal.manager.impl.ConcurrentSparqlGraphStoreManager.java

@Override
public Multimap<URI, URI> listResourcesMapByQuery(String queryStr, String variableNameA, String variableNameB) {
    Multimap<URI, URI> result = HashMultimap.create();
    // If the SPARQL endpoint does not exist return immediately.
    if (this.getSparqlQueryEndpoint() == null || queryStr == null || queryStr.isEmpty()) {
        return result;
    }/*from  w  w  w  . j av  a2  s .c o m*/

    // Query the engine
    log.debug("Evaluating SPARQL query in Knowledge Base: \n {}", queryStr);
    Query query = QueryFactory.create(queryStr);
    QueryExecution qe = QueryExecutionFactory.sparqlService(this.getSparqlQueryEndpoint().toASCIIString(),
            query);
    MonitoredQueryExecution qexec = new MonitoredQueryExecution(qe);

    try {
        Stopwatch stopwatch = new Stopwatch();
        stopwatch.start();

        ResultSet qResults = qexec.execSelect();

        stopwatch.stop();
        log.debug("Time taken for querying the registry: {}", stopwatch);

        Resource resourceA;
        Resource resourceB;
        // Iterate over the results obtained
        while (qResults.hasNext()) {
            QuerySolution soln = qResults.nextSolution();

            // Get the match URL
            resourceA = soln.getResource(variableNameA);
            resourceB = soln.getResource(variableNameB);

            if (resourceA != null && resourceA.isURIResource() && resourceB != null
                    && resourceB.isURIResource()) {
                result.put(new URI(resourceA.getURI()), new URI(resourceB.getURI()));
            } else {
                log.warn("Skipping result as the URL is null");
                break;
            }
        }
    } catch (URISyntaxException e) {
        log.error("Error obtaining match result. Expected a correct URI", e);
    } finally {
        qexec.close();
    }
    return result;
}

From source file:cosmos.impl.CosmosImpl.java

@Override
public CloseableIterable<Entry<RecordValue<?>, Long>> groupResults(Store id, Column column)
        throws TableNotFoundException, UnexpectedStateException, UnindexedColumnException {
    checkNotNull(id);/*from   ww  w  .java  2 s  .  c o  m*/

    Stopwatch sw = new Stopwatch().start();
    final String description = "Cosmos:groupResults";

    try {
        State s = PersistedStores.getState(id);

        if (!State.LOADING.equals(s) && !State.LOADED.equals(s)) {
            sw.stop();
            throw unexpectedState(id, new State[] { State.LOADING, State.LOADED }, s);
        }

        checkNotNull(column);

        Text colf = new Text(column.name());

        BatchScanner bs = id.connector().createBatchScanner(id.dataTable(), id.auths(), id.readThreads());
        bs.setRanges(Collections.singleton(Range.prefix(id.uuid())));
        bs.fetchColumnFamily(colf);

        // Filter on cq-prefix to only look at the ordering we want
        IteratorSetting filter = new IteratorSetting(50, "cqFilter", OrderFilter.class);
        filter.addOption(OrderFilter.PREFIX, Order.FORWARD);
        bs.addScanIterator(filter);

        IteratorSetting cfg = new IteratorSetting(60, GroupByRowSuffixIterator.class);
        bs.addScanIterator(cfg);

        return CloseableIterable.transform(bs, new GroupByFunction(), id.tracer(), description, sw);
    } catch (TableNotFoundException e) {
        // In the exceptional case, stop the timer
        sw.stop();
        id.tracer().addTiming(description, sw.elapsed(TimeUnit.MILLISECONDS));
        throw e;
    } catch (UnexpectedStateException e) {
        // In the exceptional case, stop the timer
        sw.stop();
        id.tracer().addTiming(description, sw.elapsed(TimeUnit.MILLISECONDS));
        throw e;
    } catch (RuntimeException e) {
        // In the exceptional case, stop the timer
        sw.stop();
        id.tracer().addTiming(description, sw.elapsed(TimeUnit.MILLISECONDS));
        throw e;
    }
    // no finally as the trace is stopped by the CloseableIterable
}

From source file:org.rhq.enterprise.server.measurement.MeasurementDataManagerBean.java

@Override
public MeasurementAggregate getMeasurementAggregate(Subject subject, int scheduleId, long startTime,
        long endTime) {
    Stopwatch stopwatch = new Stopwatch().start();
    try {/*ww w  .  j  av  a2 s . c o  m*/
        MeasurementScheduleCriteria criteria = new MeasurementScheduleCriteria();
        criteria.addFilterId(scheduleId);
        criteria.fetchResource(true);

        PageList<MeasurementSchedule> schedules = measurementScheduleManager
                .findSchedulesByCriteria(subjectManager.getOverlord(), criteria);
        if (schedules.isEmpty()) {
            throw new MeasurementException(
                    "Could not fine MeasurementSchedule with the id[" + scheduleId + "]");
        }
        MeasurementSchedule schedule = schedules.get(0);

        if (authorizationManager.canViewResource(subject, schedule.getResource().getId()) == false) {
            throw new PermissionException("User[" + subject.getName()
                    + "] does not have permission to view schedule[id=" + scheduleId + "]");
        }

        if (schedule.getDefinition().getDataType() != DataType.MEASUREMENT) {
            throw new IllegalArgumentException(
                    schedule + " is not about numerical values. Can't compute aggregates");
        }

        if (startTime > endTime) {
            throw new IllegalArgumentException("Start date " + startTime + " is not before " + endTime);
        }

        MetricsServer metricsServer = storageClientManager.getMetricsServer();
        AggregateNumericMetric summary = metricsServer.getSummaryAggregate(scheduleId, startTime, endTime);

        return new MeasurementAggregate(summary.getMin(), summary.getAvg(), summary.getMax());
    } finally {
        stopwatch.stop();
        log.debug("Finished loading measurement aggregate in " + stopwatch.elapsed(TimeUnit.MILLISECONDS));
    }
}

From source file:com.twitter.hraven.rest.RestJSONResource.java

@GET
@Path("hdfs/path/{cluster}/")
@Produces(MediaType.APPLICATION_JSON)/*  ww  w .j a v a 2 s .c o  m*/
public List<HdfsStats> getHdfsPathTimeSeriesStats(@PathParam("cluster") String cluster,
        @QueryParam("path") String path, @QueryParam("starttime") long starttime,
        @QueryParam("endtime") long endtime, @QueryParam("limit") int limit) throws IOException {
    if (StringUtils.isEmpty(path)) {
        throw new RuntimeException("Required query param missing: path ");
    }

    if (limit == 0) {
        limit = HdfsConstants.RECORDS_RETURNED_LIMIT;
    }

    if (starttime == 0L) {
        // default it to current hour's top
        long lastHour = System.currentTimeMillis();
        // convert milliseconds to seconds
        starttime = lastHour / 1000L;
    }

    if (endtime == 0L) {
        // default it to one week ago
        endtime = starttime - 7 * 86400;
    }

    if (endtime > starttime) {
        throw new RuntimeException("Ensure endtime " + endtime + " is older than starttime " + starttime);
    }

    LOG.info(String.format(
            "Fetching hdfs timeseries stats for cluster=%s, path=%s limit=%d, starttime=%d endtime=%d", cluster,
            path, limit, starttime, endtime));
    Stopwatch timer = new Stopwatch().start();
    List<HdfsStats> hdfsStats = getHdfsStatsService().getHdfsTimeSeriesStats(cluster, path, limit, starttime,
            endtime);
    timer.stop();

    if (hdfsStats != null) {
        LOG.info("For hdfs/path/{cluster}/{attribute} with input query " + "hdfs/path/" + cluster + "?limit="
                + limit + "&path=" + path + " fetched #number of HdfsStats " + hdfsStats.size() + " in "
                + timer);
    } else {
        LOG.info("For hdfs/path/{cluster}/{attribute} with input query " + "hdfs/path/" + cluster + "?limit="
                + limit + "&path=" + path + " fetched 0 HdfsStats in " + timer);
    }

    // export latency metrics
    HravenResponseMetrics.HDFS_TIMESERIES_API_LATENCY_VALUE.set(timer.elapsed(TimeUnit.MILLISECONDS));
    return hdfsStats;
}

From source file:com.twitter.hraven.rest.RestJSONResource.java

@GET
@Path("newJobs/{cluster}/")
@Produces(MediaType.APPLICATION_JSON)/*from   w  w  w  .  j av  a2 s  .co m*/
public List<AppSummary> getNewJobs(@PathParam("cluster") String cluster, @QueryParam("user") String user,
        @QueryParam("startTime") long startTime, @QueryParam("endTime") long endTime,
        @QueryParam("limit") int limit) throws IOException {
    Stopwatch timer = new Stopwatch().start();

    if (limit == 0) {
        limit = Integer.MAX_VALUE;
    }
    if (startTime == 0L) {
        // 24 hours back
        startTime = System.currentTimeMillis() - Constants.MILLIS_ONE_DAY;
        // get top of the hour
        startTime -= (startTime % 3600);
    }
    if (endTime == 0L) {
        // now
        endTime = System.currentTimeMillis();
        // get top of the hour
        endTime -= (endTime % 3600);
    }

    LOG.info("Fetching new Jobs for cluster=" + cluster + " user=" + user + " startTime=" + startTime
            + " endTime=" + endTime);
    AppSummaryService as = getAppSummaryService();
    // get the row keys from AppVersions table via JobHistoryService
    List<AppSummary> newApps = as.getNewApps(getJobHistoryService(), StringUtils.trimToEmpty(cluster),
            StringUtils.trimToEmpty(user), startTime, endTime, limit);

    timer.stop();

    LOG.info("For newJobs/{cluster}/{user}/{appId}/ with input query " + "newJobs/" + cluster + SLASH + user
            + "?limit=" + limit + "&startTime=" + startTime + "&endTime=" + endTime + " fetched "
            + newApps.size() + " flows in " + timer);

    serializationContext
            .set(new SerializationContext(SerializationContext.DetailLevel.APP_SUMMARY_STATS_NEW_JOBS_ONLY));

    // export latency metrics
    HravenResponseMetrics.NEW_JOBS_API_LATENCY_VALUE.set(timer.elapsed(TimeUnit.MILLISECONDS));
    return newApps;
}

From source file:cosmos.impl.CosmosImpl.java

@Override
public void delete(Store id)
        throws TableNotFoundException, MutationsRejectedException, UnexpectedStateException {
    checkNotNull(id);//from  w w  w  .  j  a va 2s. c  o  m

    Stopwatch sw = new Stopwatch().start();

    try {
        State s = PersistedStores.getState(id);

        if (!State.LOADING.equals(s) && !State.LOADED.equals(s)) {
            throw unexpectedState(id, new State[] { State.LOADING, State.LOADED }, s);
        }

        final State desiredState = State.DELETING;

        log.debug("Changing state for {} from {} to {}", new Object[] { id, s, desiredState });

        PersistedStores.setState(id, desiredState);

        // Delete of the Keys
        BatchDeleter bd = null;
        try {
            bd = id.connector().createBatchDeleter(id.dataTable(), id.auths(), id.readThreads(),
                    id.writerConfig());
            bd.setRanges(Collections.singleton(Range.prefix(id.uuid())));

            bd.delete();
        } finally {
            if (null != bd) {
                bd.close();
            }
        }

        log.debug("Removing state for {}", id);

        PersistedStores.remove(id);
    } finally {
        sw.stop();
        id.tracer().addTiming("Cosmos:delete", sw.elapsed(TimeUnit.MILLISECONDS));

        // Be nice and when the client deletes these results, automatically flush the traces for them too
        id.sendTraces();
    }
}

From source file:org.apache.bookkeeper.bookie.Journal.java

/**
 * A thread used for persisting journal entries to journal files.
 *
 * <p>//from  w  w  w  .j a va2s.  c o m
 * Besides persisting journal entries, it also takes responsibility of
 * rolling journal files when a journal file reaches journal file size
 * limitation.
 * </p>
 * <p>
 * During journal rolling, it first closes the writing journal, generates
 * new journal file using current timestamp, and continue persistence logic.
 * Those journals will be garbage collected in SyncThread.
 * </p>
 * @see org.apache.bookkeeper.bookie.SyncThread
 */
@Override
public void run() {
    LinkedList<QueueEntry> toFlush = new LinkedList<QueueEntry>();
    ByteBuffer lenBuff = ByteBuffer.allocate(4);
    ByteBuffer paddingBuff = ByteBuffer.allocate(2 * conf.getJournalAlignmentSize());
    ZeroBuffer.put(paddingBuff);
    JournalChannel logFile = null;
    forceWriteThread.start();
    Stopwatch journalCreationWatcher = new Stopwatch();
    Stopwatch journalFlushWatcher = new Stopwatch();
    long batchSize = 0;
    try {
        List<Long> journalIds = listJournalIds(journalDirectory, null);
        // Should not use MathUtils.now(), which use System.nanoTime() and
        // could only be used to measure elapsed time.
        // http://docs.oracle.com/javase/1.5.0/docs/api/java/lang/System.html#nanoTime%28%29
        long logId = journalIds.isEmpty() ? System.currentTimeMillis() : journalIds.get(journalIds.size() - 1);
        BufferedChannel bc = null;
        long lastFlushPosition = 0;
        boolean groupWhenTimeout = false;

        long dequeueStartTime = 0L;

        QueueEntry qe = null;
        while (true) {
            // new journal file to write
            if (null == logFile) {
                logId = logId + 1;

                journalCreationWatcher.reset().start();
                logFile = new JournalChannel(journalDirectory, logId, journalPreAllocSize,
                        journalWriteBufferSize, conf.getJournalAlignmentSize(), removePagesFromCache,
                        conf.getJournalFormatVersionToWrite());
                journalCreationStats.registerSuccessfulEvent(
                        journalCreationWatcher.stop().elapsedTime(TimeUnit.NANOSECONDS), TimeUnit.NANOSECONDS);

                bc = logFile.getBufferedChannel();

                lastFlushPosition = bc.position();
            }

            if (qe == null) {
                if (dequeueStartTime != 0) {
                    journalProcessTimeStats.registerSuccessfulEvent(MathUtils.elapsedNanos(dequeueStartTime),
                            TimeUnit.NANOSECONDS);
                }

                if (toFlush.isEmpty()) {
                    qe = queue.take();
                    dequeueStartTime = MathUtils.nowInNano();
                    journalQueueStats.registerSuccessfulEvent(MathUtils.elapsedNanos(qe.enqueueTime),
                            TimeUnit.NANOSECONDS);
                } else {
                    long pollWaitTimeNanos = maxGroupWaitInNanos
                            - MathUtils.elapsedNanos(toFlush.get(0).enqueueTime);
                    if (flushWhenQueueEmpty || pollWaitTimeNanos < 0) {
                        pollWaitTimeNanos = 0;
                    }
                    qe = queue.poll(pollWaitTimeNanos, TimeUnit.NANOSECONDS);
                    dequeueStartTime = MathUtils.nowInNano();

                    if (qe != null) {
                        journalQueueStats.registerSuccessfulEvent(MathUtils.elapsedNanos(qe.enqueueTime),
                                TimeUnit.NANOSECONDS);
                    }

                    boolean shouldFlush = false;
                    // We should issue a forceWrite if any of the three conditions below holds good
                    // 1. If the oldest pending entry has been pending for longer than the max wait time
                    if (maxGroupWaitInNanos > 0 && !groupWhenTimeout
                            && (MathUtils.elapsedNanos(toFlush.get(0).enqueueTime) > maxGroupWaitInNanos)) {
                        groupWhenTimeout = true;
                    } else if (maxGroupWaitInNanos > 0 && groupWhenTimeout && qe != null
                            && MathUtils.elapsedNanos(qe.enqueueTime) < maxGroupWaitInNanos) {
                        // when group timeout, it would be better to look forward, as there might be lots of entries already timeout
                        // due to a previous slow write (writing to filesystem which impacted by force write).
                        // Group those entries in the queue
                        // a) already timeout
                        // b) limit the number of entries to group
                        groupWhenTimeout = false;
                        shouldFlush = true;
                        flushMaxWaitCounter.inc();
                    } else if (qe != null
                            && ((bufferedEntriesThreshold > 0 && toFlush.size() > bufferedEntriesThreshold)
                                    || (bc.position() > lastFlushPosition + bufferedWritesThreshold))) {
                        // 2. If we have buffered more than the buffWriteThreshold or bufferedEntriesThreshold
                        shouldFlush = true;
                        flushMaxOutstandingBytesCounter.inc();
                    } else if (qe == null) {
                        // We should get here only if we flushWhenQueueEmpty is true else we would wait
                        // for timeout that would put is past the maxWait threshold
                        // 3. If the queue is empty i.e. no benefit of grouping. This happens when we have one
                        // publish at a time - common case in tests.
                        shouldFlush = true;
                        flushEmptyQueueCounter.inc();
                    }

                    // toFlush is non null and not empty so should be safe to access getFirst
                    if (shouldFlush) {
                        if (conf.getJournalFormatVersionToWrite() >= JournalChannel.V5) {
                            writePaddingBytes(logFile, paddingBuff, conf.getJournalAlignmentSize());
                        }
                        journalFlushWatcher.reset().start();
                        bc.flush(false);
                        lastFlushPosition = bc.position();
                        journalFlushStats.registerSuccessfulEvent(
                                journalFlushWatcher.stop().elapsedTime(TimeUnit.NANOSECONDS),
                                TimeUnit.NANOSECONDS);

                        // Trace the lifetime of entries through persistence
                        if (LOG.isDebugEnabled()) {
                            for (QueueEntry e : toFlush) {
                                LOG.debug("Written and queuing for flush Ledger:" + e.ledgerId + " Entry:"
                                        + e.entryId);
                            }
                        }

                        forceWriteBatchEntriesStats.registerSuccessfulValue(toFlush.size());
                        forceWriteBatchBytesStats.registerSuccessfulValue(batchSize);

                        forceWriteRequests.put(new ForceWriteRequest(logFile, logId, lastFlushPosition, toFlush,
                                (lastFlushPosition > maxJournalSize), false));
                        toFlush = new LinkedList<QueueEntry>();
                        batchSize = 0L;
                        // check whether journal file is over file limit
                        if (bc.position() > maxJournalSize) {
                            logFile = null;
                            continue;
                        }
                    }
                }
            }

            if (!running) {
                LOG.info("Journal Manager is asked to shut down, quit.");
                break;
            }

            if (qe == null) { // no more queue entry
                continue;
            }

            journalWriteBytes.add(qe.entry.remaining());
            journalQueueSize.dec();

            batchSize += (4 + qe.entry.remaining());

            lenBuff.clear();
            lenBuff.putInt(qe.entry.remaining());
            lenBuff.flip();

            // preAlloc based on size
            logFile.preAllocIfNeeded(4 + qe.entry.remaining());

            //
            // we should be doing the following, but then we run out of
            // direct byte buffers
            // logFile.write(new ByteBuffer[] { lenBuff, qe.entry });
            bc.write(lenBuff);
            bc.write(qe.entry);

            toFlush.add(qe);
            qe = null;
        }
        logFile.close();
        logFile = null;
    } catch (IOException ioe) {
        LOG.error("I/O exception in Journal thread!", ioe);
    } catch (InterruptedException ie) {
        LOG.warn("Journal exits when shutting down", ie);
    } finally {
        // There could be packets queued for forceWrite on this logFile
        // That is fine as this exception is going to anyway take down the
        // the bookie. If we execute this as a part of graceful shutdown,
        // close will flush the file system cache making any previous
        // cached writes durable so this is fine as well.
        IOUtils.close(LOG, logFile);
    }
    LOG.info("Journal exited loop!");
}

From source file:com.twitter.hraven.rest.RestJSONResource.java

@GET
@Path("summary/apps/{cluster}/")
@Produces(MediaType.APPLICATION_JSON)//from  w  w w.j  a va 2  s  .  c  o  m
public List<AppSummary> getAllApps(@PathParam("cluster") String cluster, @QueryParam("user") String user,
        @QueryParam("startTime") long startTime, @QueryParam("endTime") long endTime,
        @QueryParam("limit") int limit) throws IOException {
    Stopwatch timer = new Stopwatch().start();

    if (limit == 0) {
        limit = Integer.MAX_VALUE;
    }
    if (startTime == 0L) {
        // 24 hours back
        startTime = System.currentTimeMillis() - Constants.MILLIS_ONE_DAY;
        // get top of the hour
        startTime -= (startTime % 3600000);
    }
    if (endTime == 0L) {
        // now
        endTime = System.currentTimeMillis();
        // get top of the hour
        endTime -= (endTime % 3600000);
    }

    LOG.info("Fetching all apps for cluster=" + cluster + " user=" + user + " startTime=" + startTime
            + " endTime=" + endTime);
    AppSummaryService as = getAppSummaryService();
    List<AppSummary> newApps = as.getAllApps(StringUtils.trimToEmpty(cluster), StringUtils.trimToEmpty(user),
            startTime, endTime, limit);
    timer.stop();
    LOG.info("For summary/apps/{cluster}/{user}/{appId}/ with input query " + "summary/apps/" + cluster + SLASH
            + user + "?limit=" + limit + "&startTime=" + startTime + "&endTime=" + endTime + " fetched "
            + newApps.size() + " apps in " + timer);
    serializationContext
            .set(new SerializationContext(SerializationContext.DetailLevel.APP_SUMMARY_STATS_ALL_APPS));
    return newApps;
}

From source file:org.sleuthkit.autopsy.timeline.events.db.EventDB.java

/**
 * count all the events with the given options and return a map organizing
 * the counts in a hierarchy from date > eventtype> count
 *
 *
 * @param startTime events before this time will be excluded (seconds from
 *                  unix epoch)/*from   w  w  w .java2s.c  o  m*/
 * @param endTime   events at or after this time will be excluded (seconds
 *                  from unix epoch)
 * @param filter    only events that pass this filter will be counted
 * @param zoomLevel only events of this type or a subtype will be counted
 *                  and the counts will be organized into bins for each of the subtypes of
 *                  the given event type
 *
 * @return a map organizing the counts in a hierarchy from date > eventtype>
 *         count
 */
private Map<EventType, Long> countEvents(Long startTime, Long endTime, Filter filter,
        EventTypeZoomLevel zoomLevel) {
    if (Objects.equals(startTime, endTime)) {
        endTime++;
    }

    Map<EventType, Long> typeMap = new HashMap<>();

    //do we want the root or subtype column of the databse
    final boolean useSubTypes = (zoomLevel == EventTypeZoomLevel.SUB_TYPE);

    //get some info about the range of dates requested
    final String queryString = "select count(*), " + (useSubTypes ? SUB_TYPE_COLUMN : BASE_TYPE_COLUMN) // NON-NLS
            + " from events where time >= " + startTime + " and time < " + endTime + " and "
            + getSQLWhere(filter) // NON-NLS
            + " GROUP BY " + (useSubTypes ? SUB_TYPE_COLUMN : BASE_TYPE_COLUMN); // NON-NLS

    ResultSet rs = null;
    dbReadLock();
    //System.out.println(queryString);
    try (Statement stmt = con.createStatement();) {
        Stopwatch stopwatch = new Stopwatch();
        stopwatch.start();
        rs = stmt.executeQuery(queryString);
        stopwatch.stop();
        // System.out.println(stopwatch.elapsedMillis() / 1000.0 + " seconds");
        while (rs.next()) {

            EventType type = useSubTypes ? RootEventType.allTypes.get(rs.getInt(SUB_TYPE_COLUMN))
                    : BaseTypes.values()[rs.getInt(BASE_TYPE_COLUMN)];

            typeMap.put(type, rs.getLong("count(*)")); // NON-NLS
        }

    } catch (Exception ex) {
        LOGGER.log(Level.SEVERE, "error getting count of events from db.", ex); // NON-NLS
    } finally {
        try {
            rs.close();
        } catch (SQLException ex) {
            Exceptions.printStackTrace(ex);
        }
        dbReadUnlock();
    }
    return typeMap;
}