List of usage examples for com.google.common.base Stopwatch createUnstarted
@CheckReturnValue public static Stopwatch createUnstarted()
From source file:com.google.errorprone.apply.DiffApplier.java
public DiffApplier(int diffParallelism, FileSource source, FileDestination destination) { Preconditions.checkNotNull(source);/*from w ww . j a v a2 s. c om*/ Preconditions.checkNotNull(destination); this.diffsFailedPaths = new ConcurrentSkipListSet<>(); this.refactoredPaths = Sets.newConcurrentHashSet(); this.source = source; this.destination = destination; this.completedFiles = new AtomicInteger(0); this.stopwatch = Stopwatch.createUnstarted(); // configure a bounded queue and a rejectedexecutionpolicy. // In this case CallerRuns may be appropriate. this.workerService = new ThreadPoolExecutor(0, diffParallelism, 5, TimeUnit.SECONDS, new ArrayBlockingQueue<Runnable>(50), new ThreadPoolExecutor.CallerRunsPolicy()); }
From source file:es.usc.citius.composit.core.composition.search.NaiveForwardServiceDiscoverer.java
public ServiceMatchNetwork<E, T> search(Signature<E> signature) { Set<E> availableInputs = new HashSet<E>(signature.getInputs()); Set<E> newOutputs = new HashSet<E>(signature.getInputs()); Set<Operation<E>> usedServices = new HashSet<Operation<E>>(); List<Set<Operation<E>>> leveledOps = new LinkedList<Set<Operation<E>>>(); boolean checkExpectedOutputs = !signature.getOutputs().isEmpty(); boolean stop; Stopwatch timer = Stopwatch.createStarted(); Stopwatch levelTimer = Stopwatch.createUnstarted(); int level = 0; do {// w w w . j a v a 2s . c om HashSet<Operation<E>> candidates = new HashSet<Operation<E>>(); levelTimer.start(); candidates.addAll(discovery.findOperationsConsumingSome(newOutputs)); log.info("(Level {}) {} potential candidates selected in {}", level++, candidates.size(), levelTimer.toString()); // Remove services that cannot be invoked with the available inputs for (Iterator<Operation<E>> it = candidates.iterator(); it.hasNext();) { Operation<E> candidate = it.next(); Set<E> matched = matcher.partialMatch(availableInputs, candidate.getSignature().getInputs()) .getTargetElements(); // Invokable? if (matched.equals(candidate.getSignature().getInputs())) { // Invokable operation, check if it was used previously boolean isNew = usedServices.add(candidate); if (!isNew) it.remove(); } else { it.remove(); } } log.info("\t + [{}] operations selected for this level in {}: {}", candidates.size(), levelTimer.toString(), candidates); // Collect the new outputs of the new candidates newOutputs = Operations.outputs(candidates); availableInputs.addAll(newOutputs); Set<E> matchedOutputs = matcher.partialMatch(availableInputs, signature.getOutputs()) .getTargetElements(); // Add the discovered ops if (!candidates.isEmpty()) leveledOps.add(candidates); log.debug("\t + Available inputs: {}, new outputs: {}", availableInputs.size(), newOutputs.size()); // Stop condition. Stop if there are no more candidates and/or expected outputs are satisfied. stop = (checkExpectedOutputs) ? candidates.isEmpty() || matchedOutputs.equals(signature.getOutputs()) : candidates.isEmpty(); levelTimer.reset(); } while (!stop); // Add the source and sink operations Source<E> sourceOp = new Source<E>(signature.getInputs()); Sink<E> sinkOp = new Sink<E>(signature.getOutputs()); leveledOps.add(0, Collections.<Operation<E>>singleton(sourceOp)); leveledOps.add(leveledOps.size(), Collections.<Operation<E>>singleton(sinkOp)); Stopwatch networkWatch = Stopwatch.createStarted(); // Create a service match network with the discovered services DirectedAcyclicSMN<E, T> matchNetwork = new DirectedAcyclicSMN<E, T>(new HashLeveledServices<E>(leveledOps), this.matcher); log.info(" > Service match network computed in {}", networkWatch.stop().toString()); log.info("Service Match Network created with {} levels (including source and sink) and {} operations.", leveledOps.size(), matchNetwork.listOperations().size()); log.info("Forward Discovery done in {}", timer.toString()); return matchNetwork; }
From source file:com.palantir.common.base.PrefetchingBatchingVisitable.java
@Override public <K extends Exception> boolean batchAccept(final int batchSize, AbortingVisitor<? super List<T>, K> v) throws K { final Queue<List<T>> queue = Queues.newArrayDeque(); final Lock lock = new ReentrantLock(); final Condition itemAvailable = lock.newCondition(); final Condition spaceAvailable = lock.newCondition(); final AtomicBoolean futureIsDone = new AtomicBoolean(false); final AtomicReference<Throwable> exception = new AtomicReference<Throwable>(); final Stopwatch fetchTime = Stopwatch.createUnstarted(); final Stopwatch fetchBlockedTime = Stopwatch.createUnstarted(); final Stopwatch visitTime = Stopwatch.createUnstarted(); final Stopwatch visitBlockedTime = Stopwatch.createUnstarted(); Future<?> future = exec.submit(new Runnable() { @Override/* www . j a v a 2s . c om*/ public void run() { try { fetchTime.start(); delegate.batchAccept(batchSize, new AbortingVisitor<List<T>, InterruptedException>() { @Override public boolean visit(List<T> item) throws InterruptedException { fetchTime.stop(); fetchBlockedTime.start(); lock.lock(); try { while (queue.size() >= capacity) { spaceAvailable.await(); } fetchBlockedTime.stop(); queue.add(item); itemAvailable.signalAll(); } finally { lock.unlock(); } fetchTime.start(); return true; } }); fetchTime.stop(); } catch (InterruptedException e) { // shutting down } catch (Throwable t) { exception.set(t); } finally { if (fetchTime.isRunning()) { fetchTime.stop(); } if (fetchBlockedTime.isRunning()) { fetchBlockedTime.stop(); } lock.lock(); try { futureIsDone.set(true); itemAvailable.signalAll(); } finally { lock.unlock(); } } } }); try { while (true) { List<T> batch; visitBlockedTime.start(); lock.lock(); try { while (queue.isEmpty()) { if (futureIsDone.get()) { if (exception.get() != null) { throw Throwables.rewrapAndThrowUncheckedException(exception.get()); } return true; } itemAvailable.await(); } batch = queue.poll(); spaceAvailable.signalAll(); } finally { lock.unlock(); } visitBlockedTime.stop(); visitTime.start(); boolean proceed = v.visit(batch); visitTime.stop(); if (!proceed) { return false; } } } catch (InterruptedException e) { throw Throwables.rewrapAndThrowUncheckedException(e); } finally { log.debug("{} timings: fetch {}, fetchBlocked {}, visit {}, visitBlocked {}", name, fetchTime, fetchBlockedTime, visitTime, visitBlockedTime); future.cancel(true); } }
From source file:com.vmware.photon.controller.api.common.db.TransactionalInterceptor.java
@Override public Object invoke(MethodInvocation invocation) throws Throwable { SessionFactory sessionFactory = sessionFactoryProvider.get(); Session session;/* w w w .j a va 2s . co m*/ if (ManagedSessionContext.hasBind(sessionFactory)) { session = sessionFactory.getCurrentSession(); } else { session = sessionFactory.openSession(); ManagedSessionContext.bind(session); } Transaction transaction = session.getTransaction(); if (transaction.isActive()) { return invocation.proceed(); } Stopwatch stopwatch = Stopwatch.createUnstarted(); try { logger.trace("beginning transaction: {}", transaction); stopwatch.start(); transaction.begin(); Object result = invocation.proceed(); transaction.commit(); stopwatch.stop(); logger.debug("committed: {}", transaction); return result; } catch (Throwable t) { logger.debug("rolling back: {}", transaction, t); transaction.rollback(); transactionExceptions.mark(); throw t; } finally { final long elapsedTime = stopwatch.elapsed(TimeUnit.MILLISECONDS); transactions.update(elapsedTime, TimeUnit.MILLISECONDS); ManagedSessionContext.unbind(sessionFactory); if (session.isOpen()) { session.close(); } final long transactionTimeWarningThresholdInMilliseconds = 2000L; if (elapsedTime > transactionTimeWarningThresholdInMilliseconds) { logger.warn("Transaction {} took {} milliseconds", transaction, elapsedTime); } } }
From source file:org.apache.beam.sdk.io.synthetic.delay.SyntheticDelay.java
/** Keep cpu busy for {@code delayMillis} by calculating lots of hashes. */ private static void cpuDelay(long delayMillis) { // Note that the delay is enforced in terms of walltime. That implies this thread may not // keep CPU busy if it gets preempted by other threads. There is more of chance of this // occurring in a streaming pipeline as there could be lots of threads running this. The loop // measures cpu time spent for each iteration, so that these effects are some what minimized. long cpuMicros = delayMillis * 1000; Stopwatch timer = Stopwatch.createUnstarted(); while (timer.elapsed(TimeUnit.MICROSECONDS) < cpuMicros) { // Find a long which hashes to HASH in lowest MASK bits. // Values chosen to roughly take 1ms on typical workstation. timer.start();//from w w w .j a va 2 s. c om long p = INIT_PLAINTEXT; while (true) { long t = Hashing.murmur3_128().hashLong(p).asLong(); if ((t & MASK) == (HASH & MASK)) { break; } p++; } timer.stop(); } }
From source file:org.lenskit.util.monitor.TrackedJob.java
/** * Create a new tracked job./*from www . j a va 2 s . c o m*/ * @param type The job type code. * @param desc The description. */ public TrackedJob(String type, String desc) { parent = null; eventBus = new EventBus(); this.type = type; this.description = desc; uuid = UUID.randomUUID(); timer = Stopwatch.createUnstarted(); }
From source file:brooklyn.rest.filter.LoggingFilter.java
@Override public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException { HttpServletRequest httpRequest = (HttpServletRequest) request; HttpServletResponse httpResponse = (HttpServletResponse) response; String uri = httpRequest.getRequestURI(); String rid = RequestTaggingFilter.getTag(); boolean isInteresting = INTERESTING_METHODS.contains(httpRequest.getMethod()), shouldLog = (isInteresting && LOG.isDebugEnabled()) || LOG.isTraceEnabled(), requestErrored = false; Stopwatch timer = Stopwatch.createUnstarted(); try {// w w w. ja va 2 s . co m if (shouldLog) { String message = "{} starting request {} {}"; Object[] args = new Object[] { rid, httpRequest.getMethod(), uri }; if (isInteresting) { LOG.debug(message, args); } else { LOG.trace(message, args); } } timer.start(); chain.doFilter(request, response); } catch (Throwable e) { requestErrored = true; LOG.warn("REST API request " + rid + " failed: " + e, e); // Propagate for handling by other filter throw Exceptions.propagate(e); } finally { timer.stop(); // This logging must not happen before chain.doFilter, or FormMapProvider will not work as expected. // Getting the parameter map consumes the request body and only resource methods using @FormParam // will work as expected. if (requestErrored || shouldLog) { boolean includeHeaders = requestErrored || httpResponse.getStatus() / 100 == 5 || LOG.isTraceEnabled(); String message = getRequestCompletedMessage(includeHeaders, Duration.of(timer), rid, httpRequest, httpResponse); if (requestErrored || isInteresting) { LOG.debug(message); } else { LOG.trace(message); } } } }
From source file:io.druid.server.coordinator.DruidCoordinatorBalancerProfiler.java
public void bigProfiler() { Stopwatch watch = Stopwatch.createUnstarted(); int numSegments = 55000; int numServers = 50; EasyMock.expect(manager.getAllRules()).andReturn(ImmutableMap.<String, List<Rule>>of("test", rules)) .anyTimes();//from w w w .ja va 2 s. co m EasyMock.expect(manager.getRules(EasyMock.<String>anyObject())).andReturn(rules).anyTimes(); EasyMock.expect(manager.getRulesWithDefault(EasyMock.<String>anyObject())).andReturn(rules).anyTimes(); EasyMock.replay(manager); coordinator.moveSegment(EasyMock.<ImmutableDruidServer>anyObject(), EasyMock.<ImmutableDruidServer>anyObject(), EasyMock.<String>anyObject(), EasyMock.<LoadPeonCallback>anyObject()); EasyMock.expectLastCall().anyTimes(); EasyMock.replay(coordinator); List<DruidServer> serverList = Lists.newArrayList(); Map<String, LoadQueuePeon> peonMap = Maps.newHashMap(); List<ServerHolder> serverHolderList = Lists.newArrayList(); Map<String, DataSegment> segmentMap = Maps.newHashMap(); for (int i = 0; i < numSegments; i++) { segmentMap.put("segment" + i, new DataSegment("datasource" + i, new Interval(new DateTime("2012-01-01"), (new DateTime("2012-01-01")).plusHours(1)), (new DateTime("2012-03-01")).toString(), Maps.<String, Object>newHashMap(), Lists.<String>newArrayList(), Lists.<String>newArrayList(), new NoneShardSpec(), 0, 4L)); } for (int i = 0; i < numServers; i++) { ImmutableDruidServer server = EasyMock.createMock(ImmutableDruidServer.class); EasyMock.expect(server.getMetadata()).andReturn(null).anyTimes(); EasyMock.expect(server.getCurrSize()).andReturn(30L).atLeastOnce(); EasyMock.expect(server.getMaxSize()).andReturn(100L).atLeastOnce(); EasyMock.expect(server.getTier()).andReturn("normal").anyTimes(); EasyMock.expect(server.getName()).andReturn(Integer.toString(i)).atLeastOnce(); EasyMock.expect(server.getHost()).andReturn(Integer.toString(i)).anyTimes(); if (i == 0) { EasyMock.expect(server.getSegments()).andReturn(segmentMap).anyTimes(); } else { EasyMock.expect(server.getSegments()).andReturn(new HashMap<String, DataSegment>()).anyTimes(); } EasyMock.expect(server.getSegment(EasyMock.<String>anyObject())).andReturn(null).anyTimes(); EasyMock.replay(server); LoadQueuePeon peon = new LoadQueuePeonTester(); peonMap.put(Integer.toString(i), peon); serverHolderList.add(new ServerHolder(server, peon)); } DruidCoordinatorRuntimeParams params = DruidCoordinatorRuntimeParams.newBuilder() .withDruidCluster( new DruidCluster(ImmutableMap.<String, MinMaxPriorityQueue<ServerHolder>>of("normal", MinMaxPriorityQueue.orderedBy(DruidCoordinatorBalancerTester.percentUsedComparator) .create(serverHolderList)))) .withLoadManagementPeons(peonMap).withAvailableSegments(segmentMap.values()) .withDynamicConfigs( new CoordinatorDynamicConfig.Builder().withMaxSegmentsToMove(MAX_SEGMENTS_TO_MOVE) .withReplicantLifetime(500).withReplicationThrottleLimit(5).build()) .withBalancerReferenceTimestamp(new DateTime("2013-01-01")).withEmitter(emitter) .withDatabaseRuleManager(manager).withReplicationManager(new ReplicationThrottler(2, 500)) .withSegmentReplicantLookup(SegmentReplicantLookup .make(new DruidCluster(ImmutableMap.<String, MinMaxPriorityQueue<ServerHolder>>of("normal", MinMaxPriorityQueue.orderedBy(DruidCoordinatorBalancerTester.percentUsedComparator) .create(serverHolderList))))) .build(); DruidCoordinatorBalancerTester tester = new DruidCoordinatorBalancerTester(coordinator); DruidCoordinatorRuleRunner runner = new DruidCoordinatorRuleRunner(coordinator); watch.start(); DruidCoordinatorRuntimeParams balanceParams = tester.run(params); DruidCoordinatorRuntimeParams assignParams = runner.run(params); System.out.println(watch.stop()); }
From source file:org.apache.eagle.alert.coordinator.ExclusiveExecutor.java
public void execute(String path, final Runnable r, int timeoutMillis) throws TimeoutException { final AtomicBoolean executed = new AtomicBoolean(false); Stopwatch watch = Stopwatch.createUnstarted(); watch.start();//from w w w.j a v a2 s .c o m LeaderSelectorListener listener = new LeaderSelectorListenerAdapter() { @Override public void takeLeadership(CuratorFramework client) throws Exception { // this callback will get called when you are the leader // do whatever leader work you need to and only exit // this method when you want to relinquish leadership LOG.info("this is leader node right now.."); executed.set(true); try { r.run(); } catch (Throwable t) { LOG.warn("failed to run exclusive executor", t); } LOG.info("leader node executed done!.."); } @Override public void stateChanged(CuratorFramework client, ConnectionState newState) { LOG.info( String.format("leader selector state change listener, new state: %s", newState.toString())); } }; selector = new LeaderSelector(client, path, listener); // selector.autoRequeue(); // not required, but this is behavior that you // will probably expect selector.start(); // wait for given times while (watch.elapsed(TimeUnit.MILLISECONDS) < timeoutMillis) { //about 3 minutes waiting if (!executed.get()) { try { Thread.sleep(ACQUIRE_LOCK_WAIT_INTERVAL_MS); } catch (InterruptedException e) { // ignored } continue; } else { break; } } watch.stop(); if (!executed.get()) { throw new TimeoutException(String.format( "Get exclusive lock for operation on path %s failed due to wait too much time: %d ms", path, watch.elapsed(TimeUnit.MILLISECONDS))); } LOG.info("Exclusive operation done with execution time (lock plus operation) {} ms !", watch.elapsed(TimeUnit.MILLISECONDS)); }
From source file:com.google.pubsub.clients.kafka.KafkaPublisherTask.java
@Override public void run() { Stopwatch stopwatch = Stopwatch.createUnstarted(); Callback callback = (metadata, exception) -> { if (exception != null) { log.error(exception.getMessage(), exception); return; }/*w w w. jav a 2 s . com*/ addNumberOfMessages(1); metricsHandler.recordLatency(stopwatch.elapsed(TimeUnit.MILLISECONDS)); }; stopwatch.start(); for (int i = 0; i < batchSize; i++) { publisher.send(new ProducerRecord<>(topic, null, System.currentTimeMillis(), null, payload), callback); } publisher.flush(); stopwatch.stop(); }