Example usage for com.google.common.base Stopwatch Stopwatch

List of usage examples for com.google.common.base Stopwatch Stopwatch

Introduction

In this page you can find the example usage for com.google.common.base Stopwatch Stopwatch.

Prototype

Stopwatch() 

Source Link

Usage

From source file:org.n52.lod.csw.CSWLoDEnabler.java

public void asyncRunStartingFrom(final int startPos) throws IOException {
    log.info("STARTING CSW to LOD..");

    if (!(addToServer || saveToFile)) {
        log.warn("Neither triple store nor file output are activated.");
        return;/*w  ww . j  a  v a  2 s .c  o  m*/
    }

    final Stopwatch overallTimer = new Stopwatch();
    overallTimer.start();

    final Stopwatch retrievingTimer = new Stopwatch();
    final Stopwatch mappingTimer = new Stopwatch();
    final Stopwatch otherTimer = new Stopwatch();

    otherTimer.start();
    XmlToRdfMapper mapper = new GluesMapper(config);

    TripleSink serverSink = null;
    if (addToServer) {
        try {
            serverSink = new VirtuosoServer(config, mapper);
        } catch (RuntimeException e) {
            log.error("Could not connect to graph", e);
        }
    }

    TripleSink fileSink = null;
    if (saveToFile) {
        fileSink = new FileTripleSink(mapper);
    }

    long recordsInTotal;
    try {
        recordsInTotal = csw.getNumberOfRecords();
        log.debug("Retrieved number of records from server: {}", recordsInTotal);
    } catch (IllegalStateException | HttpClientException | XmlException e) {
        log.error("Could not retrieve number of records from catalog {}, falling back to {}", csw,
                FALLBACK_RECORDS_TOTAL, e);
        recordsInTotal = FALLBACK_RECORDS_TOTAL;
    }
    report.startIndex = startPos;
    report.recordNumber = recordsInTotal;
    otherTimer.stop();

    async(startPos, recordsInTotal, overallTimer, retrievingTimer, mappingTimer, serverSink, fileSink);

    otherTimer.start();
    if (fileSink != null)
        try {
            fileSink.close();
        } catch (Exception e) {
            log.error("Could not close file sink {}", fileSink, e);
        }

    if (serverSink != null)
        try {
            serverSink.close();
        } catch (Exception e) {
            log.error("Could not close server sink {}", serverSink, e);
        }

    if (!report.issues.isEmpty())
        log.error(report.extendedToString());

    overallTimer.stop();
    otherTimer.stop();

    log.info("DONE with CSW to LOD.. duration = {} (retrieving: {}, mapping = {}, other = {})", overallTimer,
            retrievingTimer, mappingTimer, otherTimer);
    log.info("Results: {}", report);
    log.info("Sinks: server = {}, file = {}", addToServer, saveToFile);
    log.info("Server: {} | File: {}", serverSink, fileSink);
}

From source file:graph.features.cpp.OpenCPP.java

private OpenCPPSolution<Box<T>> bestSolution(final UndirectedGraph<Box<T>> boxedGraph,
        final T startingMazeNode) {

    OpenCPPSolution<Box<T>> bestSolution = new OpenCPPSolution<Box<T>>(null, null, null, null,
            2 * this.getLowerBoundCost() * 2);

    final Stopwatch stopwatch = new Stopwatch();

    final DegreeInterface<T> degreeInterface = this.getGraph().fetch(DegreeFeature.class).up();

    //final int i = 0;
    for (final T oddVertice : degreeInterface.getNodesWithOddDegree().keySet()) {
        stopwatch.start();//from   ww w. ja va  2s  .  c o  m
        final UndirectedGraph<Box<T>> virtualGraph = this.buildVirtualGraph(boxedGraph, startingMazeNode,
                oddVertice);

        //final ClosedCPP<Box<T>> cppSolver = ClosedCPP.from(virtualGraph);
        final ClosedCPPInterface<Box<T>> closedCPPInterface = virtualGraph.fetch(ClosedCPPFeature.class).up();

        final ClosedCPPSolution<Box<T>> cppSolution = closedCPPInterface.solve();
        if (cppSolution.getUpperBoundCost() < bestSolution.getUpperBoundCost()) {
            bestSolution = new OpenCPPSolution<Box<T>>(new Box<T>(oddVertice), virtualGraph,
                    cppSolution.getTraversalByEdge(), cppSolution.getLowerBoundCost(),
                    cppSolution.getUpperBoundCost());
        }
        /*
        System.out.println();
        System.out.println(++i + "/" + this.oddVertices.size() + " : " + stopwatch.elapsedTime(TimeUnit.MILLISECONDS) + " " + TimeUnit.MILLISECONDS);
        System.out.println(oddVertice + " -> " + cppSolver.getUpperBoundCost() + "$");
        System.out.println();
        */
        stopwatch.reset();
    }

    return bestSolution;

}

From source file:com.sourcecode.FileInputFormat.java

/** List input directories.
 * Subclasses may override to, e.g., select only files matching a regular
 * expression. //from   w ww  .  j a va  2s.c  o m
 * 
 * @param job the job to list input paths for
 * @return array of FileStatus objects
 * @throws IOException if zero items.
 */
protected List<FileStatus> listStatus(JobContext job) throws IOException {
    Path[] dirs = getInputPaths(job);
    if (dirs.length == 0) {
        throw new IOException("No input paths specified in job");
    }

    // get tokens for all the required FileSystems..
    TokenCache.obtainTokensForNamenodes(job.getCredentials(), dirs, job.getConfiguration());

    // Whether we need to recursive look into the directory structure
    boolean recursive = getInputDirRecursive(job);

    // creates a MultiPathFilter with the hiddenFileFilter and the
    // user provided one (if any).
    List<PathFilter> filters = new ArrayList<PathFilter>();
    filters.add(hiddenFileFilter);
    PathFilter jobFilter = getInputPathFilter(job);
    if (jobFilter != null) {
        filters.add(jobFilter);
    }
    PathFilter inputFilter = new MultiPathFilter(filters);

    List<FileStatus> result = null;

    int numThreads = job.getConfiguration().getInt(LIST_STATUS_NUM_THREADS, DEFAULT_LIST_STATUS_NUM_THREADS);
    Stopwatch sw = new Stopwatch().start();
    if (numThreads == 1) {
        result = singleThreadedListStatus(job, dirs, inputFilter, recursive);
    } else {
        Iterable<FileStatus> locatedFiles = null;
        try {
            LocatedFileStatusFetcher locatedFileStatusFetcher = new LocatedFileStatusFetcher(
                    job.getConfiguration(), dirs, recursive, inputFilter, true);
            locatedFiles = locatedFileStatusFetcher.getFileStatuses();
        } catch (InterruptedException e) {
            throw new IOException("Interrupted while getting file statuses");
        }
        result = Lists.newArrayList(locatedFiles);
    }

    sw.stop();
    if (LOG.isDebugEnabled()) {
        LOG.debug("Time taken to get FileStatuses: " + sw.elapsedMillis());
    }
    LOG.info("Total input paths to process : " + result.size());
    return result;
}

From source file:com.mortardata.pig.storage.DynamoDBStorage.java

/** BACKEND **/

@SuppressWarnings("rawtypes")
@Override//  ww w. j av  a  2 s . com
public void prepareToWrite(RecordWriter writer) throws IOException {
    // Get the schema string from the UDFContext object.
    UDFContext udfc = UDFContext.getUDFContext();
    Properties p = udfc.getUDFProperties(this.getClass(), new String[] { this.udfContextSignature });
    String strSchema = p.getProperty(SCHEMA_PROPERTY);
    if (strSchema == null) {
        throw new IOException("Could not find schema in UDF context at property " + SCHEMA_PROPERTY);
    }

    // Parse the schema from the string stored in the properties object.
    this.schema = new ResourceSchema(Utils.getSchemaFromString(strSchema));

    // connect to dynamo
    this.dynamo = loadDynamoDB();

    // fetch capacity we are allowed to use
    this.maxWriteCapacity = getMaxWriteCapacity();
    this.currentWriteCapacity = this.maxWriteCapacity;
    this.queue = new DynamoWriteRequestBlockingQueue();

    // create and start the stopwatch
    this.stopwatch = new Stopwatch().start();

}

From source file:pl.llp.aircasting.view.presenter.MeasurementPresenter.java

protected synchronized void prepareTimelineView() {
    if (!timelineView.isEmpty()) {
        return;//from  w w  w.  j  a va 2  s  .  co  m
    }

    Stopwatch stopwatch = new Stopwatch().start();

    final List<Measurement> measurements = getFullView();

    if (anchor != 0 && new Date().getTime() - lastScrolled > SCROLL_TIMEOUT) {
        anchor = timeToAnchor(timeAnchor, measurements);
    }

    int position = measurements.size() - 1 - anchor;
    final long lastMeasurementTime = measurements.isEmpty() ? 0
            : measurements.get(position).getTime().getTime();

    timelineView.clear();
    TreeMap<Long, Measurement> measurementsMap = new TreeMap<Long, Measurement>();
    for (Measurement m : measurements) {
        measurementsMap.put(m.getTime().getTime(), m);
    }

    //    +1 because subMap parameters are (inclusive, exclusive)
    timelineView.addAll(measurementsMap
            .subMap(lastMeasurementTime - visibleMilliseconds, lastMeasurementTime + 1).values());
    measurementsSize = measurements.size();

    Logger.logGraphPerformance("prepareTimelineView for [" + timelineView.size() + "] took "
            + stopwatch.elapsed(TimeUnit.MILLISECONDS));
}

From source file:org.geoserver.jdbcconfig.internal.ConfigDatabase.java

public <T extends Info> CloseableIterator<T> query(final Class<T> of, final Filter filter,
        @Nullable Integer offset, @Nullable Integer limit, @Nullable SortBy... sortOrder) {

    checkNotNull(of);/* w ww.j av a2  s  .c o  m*/
    checkNotNull(filter);
    checkArgument(offset == null || offset.intValue() >= 0);
    checkArgument(limit == null || limit.intValue() >= 0);

    QueryBuilder<T> sqlBuilder = QueryBuilder.forIds(of, dbMappings).filter(filter).offset(offset).limit(limit)
            .sortOrder(sortOrder);

    final StringBuilder sql = sqlBuilder.build();
    final Map<String, Object> namedParameters = sqlBuilder.getNamedParameters();
    final Filter unsupportedFilter = sqlBuilder.getUnsupportedFilter();
    final boolean fullySupported = Filter.INCLUDE.equals(unsupportedFilter);

    if (LOGGER.isLoggable(Level.FINER)) {
        LOGGER.finer("Original filter: " + filter);
        LOGGER.finer("Supported filter: " + sqlBuilder.getSupportedFilter());
        LOGGER.finer("Unsupported filter: " + sqlBuilder.getUnsupportedFilter());
    }
    logStatement(sql, namedParameters);

    Stopwatch sw = new Stopwatch().start();
    List<String> ids = template.queryForList(sql.toString(), namedParameters, String.class);
    sw.stop();
    if (LOGGER.isLoggable(Level.FINE)) {
        LOGGER.fine(Joiner.on("").join("query returned ", ids.size(), " records in ", sw.toString()));
    }

    List<T> lazyTransformed = Lists.transform(ids, new Function<String, T>() {
        @Override
        public T apply(String id) {
            return getById(id, of);
        }
    });

    CloseableIterator<T> result;

    if (fullySupported) {
        Iterator<T> iterator = lazyTransformed.iterator();
        result = new CloseableIteratorAdapter<T>(iterator);
    } else {
        Iterator<T> iterator = lazyTransformed.iterator();
        if (offset != null) {
            Iterators.skip(iterator, offset.intValue());
        }
        if (limit != null) {
            iterator = Iterators.limit(iterator, limit.intValue());
        }
        result = CloseableIteratorAdapter.filter(iterator, filter);
    }

    return result;
}

From source file:org.apache.hadoop.hbase.zookeeper.MetaTableLocator.java

/**
 * Waits indefinitely for availability of <code>hbase:meta</code>.  Used during
 * cluster startup.  Does not verify meta, just that something has been
 * set up in zk.//from w  w w  . j  av a 2  s.  c o m
 * @see #waitMetaRegionLocation(org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher, long)
 * @throws InterruptedException if interrupted while waiting
 */
public void waitMetaRegionLocation(ZooKeeperWatcher zkw) throws InterruptedException {
    Stopwatch stopwatch = new Stopwatch().start();
    while (!stopped) {
        try {
            if (waitMetaRegionLocation(zkw, 100) != null)
                break;
            long sleepTime = stopwatch.elapsedMillis();
            // +1 in case sleepTime=0
            if ((sleepTime + 1) % 10000 == 0) {
                LOG.warn("Have been waiting for meta to be assigned for " + sleepTime + "ms");
            }
        } catch (NotAllMetaRegionsOnlineException e) {
            if (LOG.isTraceEnabled()) {
                LOG.trace("hbase:meta still not available, sleeping and retrying." + " Reason: "
                        + e.getMessage());
            }
        }
    }
}

From source file:org.rhq.server.metrics.MetricsServer.java

public AggregateNumericMetric getSummaryAggregate(List<Integer> scheduleIds, long beginTime, long endTime) {
    Stopwatch stopwatch = new Stopwatch().start();
    try {//ww  w .  j  a  v a 2  s.  co m
        DateTime begin = new DateTime(beginTime);

        if (dateTimeService.isInRawDataRange(new DateTime(beginTime))) {
            Iterable<RawNumericMetric> metrics = dao.findRawMetrics(scheduleIds, beginTime, endTime);
            return calculateAggregatedRaw(metrics, beginTime);
        }
        Bucket bucket = getBucket(begin);
        List<AggregateNumericMetric> metrics = loadMetrics(scheduleIds, beginTime, endTime, bucket);

        return calculateAggregate(metrics, beginTime, bucket);
    } finally {
        stopwatch.stop();
        if (log.isDebugEnabled()) {
            log.debug("Finished calculating group summary aggregate for [scheduleIds: " + scheduleIds
                    + ", beginTime: " + beginTime + ", endTime: " + endTime + "] in "
                    + stopwatch.elapsed(TimeUnit.MILLISECONDS) + " ms");
        }
    }
}

From source file:org.apache.drill.exec.store.schedule.BlockMapBuilder.java

/**
 * Builds a mapping of Drillbit endpoints to hostnames
 *//*  w  ww .  ja va2  s .  c o  m*/
private static ImmutableMap<String, DrillbitEndpoint> buildEndpointMap(Collection<DrillbitEndpoint> endpoints) {
    Stopwatch watch = new Stopwatch();
    watch.start();
    HashMap<String, DrillbitEndpoint> endpointMap = Maps.newHashMap();
    for (DrillbitEndpoint d : endpoints) {
        String hostName = d.getAddress();
        endpointMap.put(hostName, d);
    }
    watch.stop();
    logger.debug("Took {} ms to build endpoint map", watch.elapsed(TimeUnit.MILLISECONDS));
    return ImmutableMap.copyOf(endpointMap);
}

From source file:org.apache.drill.exec.store.schedule.AssignmentCreator.java

/**
 * Groups minor fragments together by corresponding endpoint, and creates an iterator that can be used to evenly
 * distribute work assigned to a given endpoint to all corresponding minor fragments evenly
 *
 * @return/*from w ww.j ava2 s  .co m*/
 */
private Map<DrillbitEndpoint, FragIteratorWrapper> getEndpointIterators() {
    Stopwatch watch = new Stopwatch();
    watch.start();
    Map<DrillbitEndpoint, FragIteratorWrapper> map = Maps.newLinkedHashMap();
    Map<DrillbitEndpoint, List<Integer>> mmap = Maps.newLinkedHashMap();
    for (int i = 0; i < incomingEndpoints.size(); i++) {
        DrillbitEndpoint endpoint = incomingEndpoints.get(i);
        List<Integer> intList = mmap.get(incomingEndpoints.get(i));
        if (intList == null) {
            intList = Lists.newArrayList();
        }
        intList.add(Integer.valueOf(i));
        mmap.put(endpoint, intList);
    }

    for (DrillbitEndpoint endpoint : mmap.keySet()) {
        FragIteratorWrapper wrapper = new FragIteratorWrapper();
        wrapper.iter = Iterators.cycle(mmap.get(endpoint));
        wrapper.maxCount = maxWork * mmap.get(endpoint).size();
        wrapper.minCount = Math.max(maxWork - 1, 1) * mmap.get(endpoint).size();
        map.put(endpoint, wrapper);
    }
    return map;
}