List of usage examples for com.google.common.base Stopwatch stop
public Stopwatch stop()
From source file:org.geogig.geoserver.gwc.TruncateTilesOnUpdateRefHook.java
@SuppressWarnings("unchecked") @Override//w ww. j a v a2s. co m public <T> T post(AbstractGeoGigOp<T> command, @Nullable Object retVal, @Nullable RuntimeException exception) throws Exception { checkArgument(command instanceof UpdateRef); final UpdateRef cmd = (UpdateRef) command; final String refName = (String) cmd.getClientData().get("name"); checkState(refName != null, "refName not captured in pre-hook"); if (Boolean.TRUE.equals(cmd.getClientData().get("ignore"))) { LOGGER.debug("GWC geogig truncate post-hook returning, ref '{}' is ignored.", refName); return (T) retVal; } boolean success = exception == null; if (!success) { LOGGER.info("GWC geogig truncate post-hook returning, UpdateRef operation failed on ref '{}'.", refName); return (T) retVal; } final GWC mediator = GWC.get(); if (mediator == null) { LOGGER.debug("GWC geogig truncate post-hook returning, GWC mediator not installed?."); return (T) retVal; } final Optional<Ref> oldValue = (Optional<Ref>) cmd.getClientData().get("oldValue"); final Optional<Ref> newValue = (Optional<Ref>) retVal;// == oldValue if the ref was deleted checkState(oldValue != null, "oldValue not captured in pre-hook"); if (oldValue.equals(newValue)) { LOGGER.debug("GWC geogig truncate post-hook returning, ref '{}' didn't change ({}).", refName, oldValue); return (T) retVal; } List<LayerInfo> affectedLayers; final String newRefName = newValue.get().getName(); Stopwatch sw = Stopwatch.createStarted(); affectedLayers = findAffectedLayers(mediator, command.context(), newRefName); LOGGER.debug(String.format("GWC geogig truncate post-hook found %s affected layers on branch %s in %s.", affectedLayers.size(), refName, sw.stop())); for (LayerInfo layer : affectedLayers) { truncate(mediator, command.context(), layer, oldValue, newValue); } return (T) retVal; }
From source file:org.balloon_project.overflight.task.endpointSource.CKANLoader.java
/** * Loads all Linked Open Data endpoints from Datahub.io which offer a SPARQL endpoint * @return List of Linked Open Data SPARQL endpoints *//* ww w . j ava 2s .c o m*/ public List<Endpoint> loadEndpoints() throws IOException { logger.info("CKAN endpoint loading started"); Stopwatch timer = Stopwatch.createStarted(); List<Endpoint> result = new LinkedList<>(); result.addAll(queryCKAN(DATAHUB_QUERY_LODCLOUD)); result.addAll(queryCKAN(DATAHUB_QUERY_LOD)); logger.info(result.size() + " CKAN endpoints loaded. (Duration: " + timer.stop().elapsed(TimeUnit.SECONDS) + "s)"); return result; }
From source file:com.vmware.photon.controller.apife.backends.ResourceTicketSqlBackend.java
/** * This method consumes quota associated with the specified cost * recorded in the usageMap. IF the cost pushes usage over the limit, * then this function has no side effect and false is returned. * <p/>/* w w w . ja v a 2 s. c om*/ * Quota limits and Cost metrics are loosely coupled in that a Quota limit * can be set for a narrow set of metrics. Only these metrics are used * for limit enforcement. All metrics are tracked in usage. * <p> * Note: it is assumed that locks preventing concurrency on this structure * are held externally, or are managed through optimistic concurrency/retry * on the container that owns the ResourceTicket object (normally the project). * </p> * * @param resourceTicketId - id of the resource ticket * @param cost - the cost object representing how much will be consumed * @throws QuotaException when quota allocation fails */ @Override @Transactional public void consumeQuota(String resourceTicketId, QuotaCost cost) throws QuotaException { Stopwatch resourceTicketWatch = Stopwatch.createStarted(); ResourceTicketEntity resourceTicket = resourceTicketDao.loadWithUpgradeLock(resourceTicketId); resourceTicketWatch.stop(); logger.info("consumeQuota for resourceTicket {}, lock obtained in {} milliseconds", resourceTicket.getId(), resourceTicketWatch.elapsed(TimeUnit.MILLISECONDS)); // first, whip through the cost's actualCostKeys and // compute the new usage. then, if usage is ok, commit // the new usage values and then update rawUsage List<QuotaLineItemEntity> newUsage = new ArrayList<>(); for (String key : cost.getCostKeys()) { if (!resourceTicket.getUsageMap().containsKey(key)) { // make sure usage map has appropriate entries, its only initialized // with keys from the limit set resourceTicket.getUsageMap().put(key, new QuotaLineItemEntity(key, 0.0, cost.getCost(key).getUnit())); } // capture current usage into a new object QuotaLineItemEntity qli = new QuotaLineItemEntity(key, resourceTicket.getUsageMap().get(key).getValue(), resourceTicket.getUsageMap().get(key).getUnit()); QuotaLineItemEntity computedUsage = qli.add(cost.getCost(key)); newUsage.add(computedUsage); } // now compare newUsage against limits. if usage > limit, then return false with no // side effects. otherwise, apply the new usage values, then blindly update rawUsage for (QuotaLineItemEntity qli : newUsage) { // only enforce limits is the usage entry is covered by // limits if (resourceTicket.getLimitMap().containsKey(qli.getKey())) { // test to see if the limit is less than the computed // new usage. if it is, then abort if (resourceTicket.getLimitMap().get(qli.getKey()).compareTo(qli) < 0) { throw new QuotaException(resourceTicket.getLimitMap().get(qli.getKey()), resourceTicket.getLimitMap().get(qli.getKey()), qli); } } } // if we made it this far, commit the new usage for (QuotaLineItemEntity qli : newUsage) { resourceTicket.getUsageMap().put(qli.getKey(), qli); } }
From source file:nl.knaw.huygens.timbuctoo.storage.graph.tinkerpop.TinkerPopLowLevelAPI.java
public Iterator<Vertex> getLatestVerticesOf(Class<? extends Entity> type) { Stopwatch retrieveStopwatch = Stopwatch.createStarted(); LOG.debug("Retrieve vertices of type [{}]", type); Stopwatch queryStopwatch = Stopwatch.createStarted(); LOG.debug("Query vertices of type [{}]", type); Iterable<Vertex> vertices = queryByType(type).has(IS_LATEST, true).vertices(); LOG.debug("Query vertices of type [{}] ended in [{}]", type, queryStopwatch.stop()); LOG.debug("Retrieve vertices of type [{}] ended in [{}]", type, retrieveStopwatch.stop()); return vertices.iterator(); }
From source file:org.apache.brooklyn.launcher.osgi.OsgiLauncherImpl.java
@Override public void initOsgi() { synchronized (reloadLock) { final Stopwatch startupTimer = Stopwatch.createStarted(); BrooklynShutdownHooks.resetShutdownFlag(); LOG.debug("OsgiLauncher init, catalog " + defaultCatalogLocation); catalogInitialization(new CatalogInitialization(String.format("file:%s", defaultCatalogLocation))); startPartOne();/*ww w .jav a2 s. c om*/ startupTimer.stop(); LOG.info("Brooklyn initialisation (part one) complete after {}", startupTimer.toString()); } }
From source file:org.codice.ddf.configuration.migration.ExportMigrationContextImpl.java
/** * Performs an export using the context's migratable. * * @return metadata to export for the corresponding migratable keyed by the migratable's id * @throws org.codice.ddf.migration.MigrationException to stop the export operation *//*from ww w . j av a 2 s . co m*/ @SuppressWarnings("PMD.DefaultPackage" /* designed to be called from ExportMigrationManagerImpl within this package */) Map<String, Map<String, Object>> doExport() { LOGGER.debug("Exporting [{}] with version [{}]...", id, getMigratableVersion()); // version will never be empty Stopwatch stopwatch = null; if (LOGGER.isDebugEnabled()) { stopwatch = Stopwatch.createStarted(); } migratable.doExport(this); if (LOGGER.isDebugEnabled() && (stopwatch != null)) { LOGGER.debug("Exported time for {}: {}", id, stopwatch.stop()); } final Map<String, Map<String, Object>> metadata = ImmutableMap.of(id, report.getMetadata()); LOGGER.debug("Exported metadata for {}: {}", id, metadata); return metadata; }
From source file:qa.qcri.nadeef.core.pipeline.CleanExecutor.java
/** * Runs the violation repair./*from ww w. j av a 2 s. co m*/ */ public CleanExecutor repair() { Stopwatch sw = Stopwatch.createStarted(); repairFlow.reset(); repairFlow.start(); repairFlow.waitUntilFinish(); context.clearNewTuples(); PerfReport.appendMetric(PerfReport.Metric.RepairTime, sw.elapsed(TimeUnit.MILLISECONDS)); sw.stop(); // TODO: remove it. System.gc(); return this; }
From source file:jobs.ComputeStratifiedFrequencies2.java
@Override public void doJob() throws Exception { Logger.info("trends computation started..."); Stopwatch stopwatch = Stopwatch.createUnstarted(); stopwatch.start();/*from w w w . j a v a 2 s. c o m*/ int now = Integer.parseInt((String) play.Play.configuration.get("analysis.year")); int y5 = now - 5; //iterate over all the years and save the values Logger.info("Reading index..."); Directory directory = FSDirectory.open(VirtualFile.fromRelativePath("/indexes/index-" + y5).getRealFile()); DirectoryReader ireader = DirectoryReader.open(directory); Terms terms = SlowCompositeReaderWrapper.wrap(ireader).terms("contents"); TermsEnum iterator = terms.iterator(null); BytesRef byteRef; Map<Long, Double> frequencies = new HashMap<Long, Double>(); while ((byteRef = iterator.next()) != null) { String term = new String(byteRef.bytes, byteRef.offset, byteRef.length); if (!term.contains("_")) { Logger.info("Term: " + term); Stopwatch time = Stopwatch.createUnstarted(); time.start(); Phrase phrase = Phrase.find("byValue", term).first(); if (phrase != null) { Logger.info("Term: " + phrase.value + " (" + term + ")"); int frequency = iterator.docFreq(); frequencies.put(phrase.id, (double) frequency); } time.stop(); Logger.info("- Query time: " + time.elapsed(TimeUnit.MILLISECONDS)); } } ireader.close(); directory.close(); Phrase.em().flush(); Phrase.em().clear(); int counter = 0; for (Long id : frequencies.keySet()) { Phrase phrase = Phrase.findById(id); phrase.frequency5y = frequencies.get(id); phrase.save(); counter++; Logger.info("Counter: " + counter); if (counter % 1000 == 0) { Phrase.em().flush(); Phrase.em().clear(); } } stopwatch.stop(); Utils.emailAdmin("Yearly frequency calculated. ", "Job finished in " + stopwatch.elapsed(TimeUnit.MINUTES) + " minutes."); Logger.info("Job done."); }
From source file:org.bitcoinj_extra.wallet.WalletFiles.java
private void saveNowInternal() throws IOException { final Stopwatch watch = Stopwatch.createStarted(); File directory = file.getAbsoluteFile().getParentFile(); File temp = File.createTempFile("wallet", null, directory); final Listener listener = vListener; if (listener != null) listener.onBeforeAutoSave(temp); wallet.saveToFile(temp, file);/* w w w . ja v a 2 s . c om*/ if (listener != null) listener.onAfterAutoSave(file); watch.stop(); log.info("Save completed in {}", watch); }
From source file:org.activityinfo.server.endpoint.gwtrpc.AdvisoryLock.java
@Override public void close() throws Exception { Stopwatch stopwatch = Stopwatch.createStarted(); String sql = String.format("SELECT RELEASE_LOCK('%s')", ADVISORY_LOCK_NAME); Query query = entityManager.getSession().createSQLQuery(sql); Object result = query.uniqueResult(); int resultCode = ((Number) result).intValue(); if (resultCode != SUCCESS_CODE) { throw new RuntimeException("Failed to release lock, result code: " + resultCode); }/* w w w . ja va 2 s.co m*/ stopwatch.stop(); LOGGER.finest("Release lock takes: " + stopwatch.elapsed(TimeUnit.MILLISECONDS) + "ms"); }