Example usage for com.google.common.base Stopwatch stop

List of usage examples for com.google.common.base Stopwatch stop

Introduction

In this page you can find the example usage for com.google.common.base Stopwatch stop.

Prototype

public Stopwatch stop() 

Source Link

Document

Stops the stopwatch.

Usage

From source file:uk.ac.open.kmi.iserve.sal.manager.impl.ConcurrentSparqlGraphStoreManager.java

@Override
public Multimap<URI, URI> listResourcesMapByQuery(String queryStr, String variableNameA, String variableNameB) {
    Multimap<URI, URI> result = HashMultimap.create();
    // If the SPARQL endpoint does not exist return immediately.
    if (this.getSparqlQueryEndpoint() == null || queryStr == null || queryStr.isEmpty()) {
        return result;
    }//  w w  w.  ja v  a2  s.  co  m

    // Query the engine
    log.debug("Evaluating SPARQL query in Knowledge Base: \n {}", queryStr);
    Query query = QueryFactory.create(queryStr);
    QueryExecution qe = QueryExecutionFactory.sparqlService(this.getSparqlQueryEndpoint().toASCIIString(),
            query);
    MonitoredQueryExecution qexec = new MonitoredQueryExecution(qe);

    try {
        Stopwatch stopwatch = new Stopwatch();
        stopwatch.start();

        ResultSet qResults = qexec.execSelect();

        stopwatch.stop();
        log.debug("Time taken for querying the registry: {}", stopwatch);

        Resource resourceA;
        Resource resourceB;
        // Iterate over the results obtained
        while (qResults.hasNext()) {
            QuerySolution soln = qResults.nextSolution();

            // Get the match URL
            resourceA = soln.getResource(variableNameA);
            resourceB = soln.getResource(variableNameB);

            if (resourceA != null && resourceA.isURIResource() && resourceB != null
                    && resourceB.isURIResource()) {
                result.put(new URI(resourceA.getURI()), new URI(resourceB.getURI()));
            } else {
                log.warn("Skipping result as the URL is null");
                break;
            }
        }
    } catch (URISyntaxException e) {
        log.error("Error obtaining match result. Expected a correct URI", e);
    } finally {
        qexec.close();
    }
    return result;
}

From source file:org.terasology.cities.debug.SwingRasterizer.java

public void rasterizeChunk(Graphics2D g, Vector2i coord) {

    int chunkSizeX = ChunkConstants.SIZE_X;
    int chunkSizeZ = ChunkConstants.SIZE_Z;

    int wx = coord.getX() * chunkSizeX;
    int wz = coord.getY() * chunkSizeZ;

    Sector sector = Sectors.getSectorForBlock(wx, wz);

    if (g.hitClip(wx, wz, chunkSizeX, chunkSizeZ)) {

        Stopwatch swBK = debugMap.getUnchecked("RASTER Background");
        Stopwatch swCt = debugMap.getUnchecked("RASTER Cities");
        Stopwatch swRd = debugMap.getUnchecked("RASTER Roads");

        BufferedImage image = new BufferedImage(chunkSizeX, chunkSizeZ, BufferedImage.TYPE_INT_RGB);
        Brush brush = new SwingBrush(wx, wz, image, colorFunc);

        HeightMap cachedHm = HeightMaps.caching(heightMap, brush.getAffectedArea(), 8);
        TerrainInfo ti = new TerrainInfo(cachedHm);

        swBK.start();//from w w w .  j  a  va2 s. com
        drawBackground(image, wx, wz, ti);
        swBK.stop();

        swCt.start();
        drawCities(sector, ti, brush);
        swCt.stop();

        swRd.start();
        drawRoads(sector, ti, brush);
        swRd.stop();

        int ix = wx;
        int iy = wz;
        g.drawImage(image, ix, iy, null);

    }

}

From source file:com.vmware.photon.controller.apife.backends.VmSqlBackend.java

/**
 * Create VM entity in the database.//from   w ww. j a v  a 2s .  c o m
 */
@VisibleForTesting
@Transactional
protected VmEntity create(String projectId, VmCreateSpec spec) throws ExternalException {
    Stopwatch createWatch = Stopwatch.createStarted();
    ProjectEntity project = projectBackend.findById(projectId);
    FlavorEntity flavorEntity = flavorBackend.getEntityByNameAndKind(spec.getFlavor(), Vm.KIND);
    if (!FlavorState.READY.equals(flavorEntity.getState())) {
        throw new InvalidFlavorStateException(
                String.format("Create vm using flavor with name: %s is in invalid state %s.",
                        flavorEntity.getName(), flavorEntity.getState()));
    }

    VmEntity vm = new VmEntity();
    vm.setName(spec.getName());
    vm.setFlavorId(flavorEntity.getId());
    //The Hibernate requires it to be a separate list, because one list cannot be used by two entities.
    vm.setCost(new ArrayList<>(flavorEntity.getCost()));
    vm.setEnvironment(spec.getEnvironment());

    Set<TagEntity> tags = new HashSet<>();
    for (String tag : spec.getTags()) {
        tags.add(tagDao.findOrCreate(tag));
    }
    vm.setTags(tags);

    vm.setNetworks(spec.getNetworks());

    ImageEntity image = imageBackend.findById(spec.getSourceImageId());
    logger.debug("Image {} found for image name {}", image.getId(), image.getName());

    if (!ImageState.READY.equals(image.getState())) {
        throw new InvalidImageStateException(
                String.format("Image %s is in %s state", image.getId(), image.getState()));
    }
    vm.setImageId(image.getId());
    updateBootDiskCapacity(spec.getAttachedDisks(), image, vm);

    vm.setProjectId(project.getId());
    vm.setState(VmState.CREATING);

    String resourceTickedId = project.getResourceTicketId();

    Stopwatch resourceTicketWatch = Stopwatch.createStarted();
    resourceTicketBackend.consumeQuota(resourceTickedId, new QuotaCost(vm.getCost()));
    resourceTicketWatch.stop();
    logger.info("VmSqlBackend.create for Vm Name: {}, resourceTicket {}, consumeQuota in {} milliseconds",
            vm.getName(), resourceTickedId, resourceTicketWatch.elapsed(TimeUnit.MILLISECONDS));

    vmDao.create(vm);

    vm.setAttachedDisks(attachedDiskBackend.createAttachedDisks(vm, spec.getAttachedDisks()));
    vm.setAffinities(localityBackend.create(vm, spec.getAffinities()));

    createWatch.stop();
    logger.info("VmSqlBackend.create for Vm Id: {} and Name: {} took {} milliseconds", vm.getId(), vm.getName(),
            createWatch.elapsed(TimeUnit.MILLISECONDS));

    return vm;
}

From source file:qa.qcri.nadeef.core.utils.sql.PostgresSQLDialect.java

/**
 * {@inheritDoc}/* w  w w . ja  va  2s .  c  om*/
 */
@Override
public int bulkLoad(DBConfig dbConfig, String tableName, Path file, boolean skipHeader) {
    Logger tracer = Logger.getLogger(PostgresSQLDialect.class);
    tracer.info("Bulk load CSV file " + file.toString());
    try (Connection conn = DBConnectionPool.createConnection(dbConfig, true);
            FileReader reader = new FileReader(file.toFile())) {
        Stopwatch watch = Stopwatch.createStarted();
        Schema schema = DBMetaDataTool.getSchema(dbConfig, tableName);
        StringBuilder builder = new StringBuilder();
        for (Column column : schema.getColumns()) {
            if (column.getColumnName().equalsIgnoreCase("TID"))
                continue;
            builder.append(column.getColumnName()).append(",");
        }
        builder.deleteCharAt(builder.length() - 1);

        CopyManager copyManager = new CopyManager((BaseConnection) conn);
        String sql = String.format("COPY %s (%s) FROM STDIN WITH (FORMAT 'csv', DELIMITER ',', HEADER %s)",
                tableName, builder.toString(), skipHeader ? "true" : "false");
        tracer.info(sql);
        copyManager.copyIn(sql, reader);
        watch.stop();
        tracer.info("Bulk load finished in " + watch.elapsed(TimeUnit.MILLISECONDS) + " ms");
    } catch (Exception ex) {
        tracer.error("Loading csv file " + file.getFileName() + " failed.", ex);
        return 1;
    }
    return 0;
}

From source file:org.apache.accumulo.gc.replication.CloseWriteAheadLogReferences.java

@Override
public void run() {
    // As long as we depend on a newer Guava than Hadoop uses, we have to make sure we're compatible with
    // what the version they bundle uses.
    Stopwatch sw = new Stopwatch();

    Connector conn;/*from   w  w w.  ja  va 2  s .  c  o m*/
    try {
        conn = context.getConnector();
    } catch (Exception e) {
        log.error("Could not create connector", e);
        throw new RuntimeException(e);
    }

    if (!ReplicationTable.isOnline(conn)) {
        log.debug("Replication table isn't online, not attempting to clean up wals");
        return;
    }

    Span findWalsSpan = Trace.start("findReferencedWals");
    HashSet<String> closed = null;
    try {
        sw.start();
        closed = getClosedLogs(conn);
    } finally {
        sw.stop();
        findWalsSpan.stop();
    }

    log.info("Found " + closed.size() + " WALs referenced in metadata in " + sw.toString());
    sw.reset();

    Span updateReplicationSpan = Trace.start("updateReplicationTable");
    long recordsClosed = 0;
    try {
        sw.start();
        recordsClosed = updateReplicationEntries(conn, closed);
    } finally {
        sw.stop();
        updateReplicationSpan.stop();
    }

    log.info(
            "Closed " + recordsClosed + " WAL replication references in replication table in " + sw.toString());
}

From source file:com.thinkbiganalytics.feedmgr.nifi.CreateFeedBuilder.java

private long eventTime(Stopwatch eventTime) {
    eventTime.stop();
    long elapsedTime = eventTime.elapsed(TimeUnit.MILLISECONDS);
    eventTime.reset();//w  ww  .  j av a  2 s  . c  o  m
    return elapsedTime;
}

From source file:com.b2international.snowowl.snomed.reasoner.server.classification.ReasonerTaxonomyWalker.java

/**
 * //from  w  ww .  ja  v  a  2s.  com
 */
public void walk() {
    LOGGER.info(">>> Taxonomy extraction");

    final Stopwatch stopwatch = Stopwatch.createStarted();
    final Deque<Node<OWLClass>> nodesToProcess = new LinkedList<Node<OWLClass>>();
    nodesToProcess.add(reasoner.getTopClassNode());

    // Breadth-first walk through the class hierarchy
    while (!nodesToProcess.isEmpty()) {

        final Node<OWLClass> currentNode = nodesToProcess.removeFirst();
        final NodeSet<OWLClass> nextNodeSet = walk(currentNode);

        if (!EMPTY_NODE_SET.equals(nextNodeSet)) {

            nodesToProcess.addAll(nextNodeSet.getNodes());

        }

    }

    processedConceptIds.clear();
    processedConceptIds = null;

    LOGGER.info(MessageFormat.format("<<< Taxonomy extraction [{0}]", stopwatch.stop().toString()));
}

From source file:uk.ac.open.kmi.iserve.sal.manager.impl.ServiceManagerIndexRdf.java

private void indexService(Service service) {
    Stopwatch stopwatch = new Stopwatch();
    stopwatch.start();/*from w  w  w  . j ava 2  s .  com*/
    List<Operation> operations = service.getOperations();
    Set<URI> svcOps = new HashSet<URI>();
    for (Operation operation : operations) {
        svcOps.add(operation.getUri());
        indexOperation(operation);
    }
    // Set the svcOp map
    this.svcOpMap.put(service.getUri(), svcOps);
    // Index the modelReferences
    indexModelReferences(service);
    stopwatch.stop();
    log.info("Service - {} - indexed. Time taken {}", service.getUri(), stopwatch);
}

From source file:de.nx42.maps4cim.map.texture.osm.OverpassBridge.java

/**
 * Downloads the requested data from the Overpass servers and stores
 * the osm xml file on the disk cache, using the specified hash String
 * for later retrieval//from   ww w .  j a va2  s.c  o  m
 * @param hash the hash under which the file can be retrieved later
 * @return the resulting osm xml file
 * @throws TextureProcessingException if anything goes wrong while
  * downloading data from the Overpass servers
 */
protected File downloadAndCache(OsmHash hash) throws TextureProcessingException {
    Exception inner = null;
    for (String server : servers) {
        try {
            final Stopwatch stopwatch = Stopwatch.createStarted();

            // generate Query and store result in temp
            URL query = buildQueryURL(server);
            File dest = Cache.temporaray(hash.getXmlFileName());

            // 5 seconds connection timeout, 90 seconds for the server to execute the query
            // (so after this time, the download must start, or a timeout occurs)
            Network.downloadToFile(query, dest, 5, 90);

            // zip result and store in cache
            if (caching) {
                hash.storeInCache(dest);
            }

            stopwatch.stop();
            log.debug("Download from server {} finished in {}", query.getHost(), stopwatch.toString());
            // return plain text xml from temporary directory
            return dest;
        } catch (UnknownHostException e) {
            inner = e;
            log.error("The URL of Overpass-Server {} could not be resolved. Are you connected to the internet?",
                    e.getMessage());
        } catch (SocketTimeoutException e) {
            inner = e;
            log.error("Error getting data from Overpass Server " + server + "\nTrying next ...", e);
        } catch (IOException e) {
            inner = e;
            log.error("I/O Exception while processing OpenStreetMap source data.", e);
        }
    }
    throw new TextureProcessingException(
            "OpenStreetMap source data could " + "not be retrieved via Overpass API.", inner);
}

From source file:org.hyperledger.core.bitcoin.BitcoinPersistentBlocks.java

@Override
public StoredTransaction readTransaction(TID hash) throws HyperLedgerException {
    nRead++;//  www  .j a va  2s  .  c  om
    Stopwatch watch = Stopwatch.createStarted();
    try {
        byte[] data = store
                .get(OrderedMapStoreKey.createKey(OrderedMapStoreKey.KeyType.TX, hash.unsafeGetArray()));
        if (data != null) {
            StoredTransaction t = StoredTransaction.fromLevelDB(data);
            if (!t.getID().equals(hash)) {
                throw new HyperLedgerException("Database inconsistency in TX " + hash);
            }
            return t;
        }
        return null;
    } finally {
        readTime += watch.stop().elapsed(TimeUnit.MILLISECONDS);
    }
}