List of usage examples for com.google.common.base Stopwatch elapsed
@CheckReturnValue public long elapsed(TimeUnit desiredUnit)
From source file:processing.MalletCalculator.java
public static List<Map<Integer, Double>> startLdaCreation(BookmarkReader reader, int sampleSize, boolean sorting, int numTopics, boolean userBased, boolean resBased, boolean topicCreation, boolean smoothing) { timeString = ""; int size = reader.getUserLines().size(); int trainSize = size - sampleSize; Stopwatch timer = new Stopwatch(); timer.start();/* w w w . j av a 2 s .c om*/ MalletCalculator userCalc = null; List<Map<Integer, Integer>> userMaps = null; //List<Double> userDenoms = null; if (userBased) { userMaps = Utilities.getUserMaps(reader.getUserLines().subList(0, trainSize)); userCalc = new MalletCalculator(userMaps, numTopics); userCalc.predictValuesProbs(); //userDenoms = getDenoms(userPredictionValues); System.out.println("User-Training finished"); } MalletCalculator resCalc = null; List<Map<Integer, Integer>> resMaps = null; //List<Double> resDenoms = null; if (resBased) { resMaps = Utilities.getResMaps(reader.getUserLines().subList(0, trainSize)); resCalc = new MalletCalculator(resMaps, numTopics); resCalc.predictValuesProbs(); //resDenoms = getDenoms(resPredictionValues); System.out.println("Res-Training finished"); } List<Map<Integer, Double>> results = new ArrayList<Map<Integer, Double>>(); if (trainSize == size) { trainSize = 0; } timer.stop(); long trainingTime = timer.elapsed(TimeUnit.MILLISECONDS); timer = new Stopwatch(); timer.start(); for (int i = trainSize; i < size; i++) { // the test set UserData data = reader.getUserLines().get(i); int userID = data.getUserID(); int resID = data.getWikiID(); //Map<Integer, Integer> userMap = null; //if (userBased && userMaps != null && userID < userMaps.size()) { // userMap = userMaps.get(userID); //} //Map<Integer, Integer> resMap = null; //if (resBased && resMaps != null && resID < resMaps.size()) { // resMap = resMaps.get(resID); //} double userTagCount = 0.0;//Utilities.getMapCount(userMap); double resTagCount = 0.0;//Utilities.getMapCount(resMap); /* double userDenomVal = 0.0; if (userDenoms != null && userID < userDenoms.size()) { userDenomVal = userDenoms.get(userID); } double resDenomVal = 0.0; if (resDenoms != null && resID < resDenoms.size()) { resDenomVal = resDenoms.get(resID); } */ Map<Integer, Double> userPredMap = null; if (userCalc != null) { userPredMap = userCalc.getValueProbsForID(userID, topicCreation); } Map<Integer, Double> resPredMap = null; if (resCalc != null) { resPredMap = resCalc.getValueProbsForID(resID, topicCreation); } Map<Integer, Double> map = getRankedTagList(reader, userPredMap, userTagCount, resPredMap, resTagCount, sorting, smoothing, topicCreation); results.add(map); } timer.stop(); long testTime = timer.elapsed(TimeUnit.MILLISECONDS); timeString += ("Full training time: " + trainingTime + "\n"); timeString += ("Full test time: " + testTime + "\n"); timeString += ("Average test time: " + testTime / (double) sampleSize) + "\n"; timeString += ("Total time: " + (trainingTime + testTime) + "\n"); return results; }
From source file:org.apache.jackrabbit.oak.scalability.suites.ScalabilityAbstractSuite.java
/** * Runs the iteration of the benchmarks added. * //from ww w . j av a 2 s.c o m * @param context the execution context * @throws Exception */ private void runIteration(ExecutionContext context) throws Exception { Preconditions.checkArgument(benchmarks != null && !benchmarks.isEmpty(), "No Benchmarks configured"); for (String key : benchmarks.keySet()) { ScalabilityBenchmark benchmark = benchmarks.get(key); if (result.getBenchmarkStatistics(benchmark) == null) { result.addBenchmarkStatistics(benchmark, new SynchronizedDescriptiveStatistics()); } Stopwatch watch = Stopwatch.createStarted(); executeBenchmark(benchmark, context); watch.stop(); result.getBenchmarkStatistics(benchmark).addValue(watch.elapsed(TimeUnit.MILLISECONDS)); if (LOG.isDebugEnabled()) { LOG.debug("Execution time for " + benchmark + "-" + watch.elapsed(TimeUnit.MILLISECONDS)); } } }
From source file:org.glowroot.central.DownstreamServiceImpl.java
private AgentResponse runOnCluster(String agentId, CentralRequest centralRequest) throws Exception { int timeoutSeconds; switch (centralRequest.getMessageCase()) { case HEADER_REQUEST: case ENTRIES_REQUEST: case MAIN_THREAD_PROFILE_REQUEST: case AUX_THREAD_PROFILE_REQUEST: case FULL_TRACE_REQUEST: timeoutSeconds = 5;/*w w w . j ava 2 s .c o m*/ break; case HEAP_DUMP_REQUEST: timeoutSeconds = 300; break; default: timeoutSeconds = 60; } // retry up to 5 seconds on shutting-down response to give agent time to reconnect to // another cluster node Stopwatch stopwatch = Stopwatch.createStarted(); while (stopwatch.elapsed(SECONDS) < 5) { java.util.Optional<AgentResult> optional = connectedAgents.execute(agentId, timeoutSeconds, new SendDownstreamFunction(centralRequest, timeoutSeconds)); if (!optional.isPresent()) { throw new AgentNotConnectedException(); } AgentResult result = optional.get(); Optional<AgentResponse> value = result.value(); if (value.isPresent()) { AgentResponse response = value.get(); if (response.getMessageCase() == AgentResponse.MessageCase.UNKNOWN_REQUEST_RESPONSE) { throw new AgentUnsupportedOperationException(); } if (response.getMessageCase() == AgentResponse.MessageCase.EXCEPTION_RESPONSE) { throw new AgentException(); } return response; } else if (result.timeout()) { throw new TimeoutException(); } else if (result.interrupted()) { // this should not happen throw new RuntimeException( "Glowroot central thread was interrupted while waiting for agent response"); } // only other case is shutting-down response checkState(result.shuttingDown()); MILLISECONDS.sleep(100); } // received shutting-down response for 5+ seconds throw new AgentNotConnectedException(); }
From source file:com.palantir.atlasdb.keyvalue.impl.ProfilingKeyValueService.java
@Override public void addGarbageCollectionSentinelValues(String tableName, Set<Cell> cells) { if (log.isTraceEnabled()) { Stopwatch stopwatch = Stopwatch.createStarted(); delegate.addGarbageCollectionSentinelValues(tableName, cells); log.trace("Call to KVS.addGarbageCollectionSentinelValues on table {} over {} cells took {} ms.", tableName, cells.size(), stopwatch.elapsed(TimeUnit.MILLISECONDS)); } else {//from w ww . j a va 2s. c o m delegate.addGarbageCollectionSentinelValues(tableName, cells); } }
From source file:ch.ge.ve.protopoc.service.protocol.DefaultAuthority.java
@Override public void startPartialDecryption() { log.info("Authority " + j + " starting decryption"); ShufflesAndProofs shufflesAndProofs = bulletinBoardService.getShufflesAndProofs(); List<Encryption> encryptions = mixingAuthorityAlgorithms.getEncryptions(ballotEntries, confirmationEntries); List<ShuffleProof> shuffleProofs = shufflesAndProofs.getShuffleProofs(); List<List<Encryption>> shuffles = shufflesAndProofs.getShuffles(); Stopwatch checkShuffleWatch = Stopwatch.createStarted(); if (!decryptionAuthorityAlgorithms.checkShuffleProofs(shuffleProofs, encryptions, shuffles, systemPublicKey, j)) {//from w ww.java 2s. c om throw new InvalidShuffleProofRuntimeException("At least one shuffle proof was invalid"); } checkShuffleWatch.stop(); perfLog.info(String.format("Authority %d : checked shuffle proof in %dms", j, checkShuffleWatch.elapsed(TimeUnit.MILLISECONDS))); BigInteger secretKey = myPrivateKey.getPrivateKey(); List<Encryption> finalShuffle = shuffles.get(publicParameters.getS() - 1); Stopwatch decryptionWatch = Stopwatch.createStarted(); List<BigInteger> partialDecryptions = decryptionAuthorityAlgorithms.getPartialDecryptions(finalShuffle, secretKey); decryptionWatch.stop(); perfLog.info(String.format("Authority %d : decrypted in %dms", j, decryptionWatch.elapsed(TimeUnit.MILLISECONDS))); BigInteger publicKey = myPublicKey.getPublicKey(); Stopwatch decryptionProofWatch = Stopwatch.createStarted(); DecryptionProof decryptionProof = decryptionAuthorityAlgorithms.genDecryptionProof(secretKey, publicKey, finalShuffle, partialDecryptions); decryptionProofWatch.stop(); perfLog.info(String.format("Authority %d : decryption proof in %dms", j, decryptionProofWatch.elapsed(TimeUnit.MILLISECONDS))); bulletinBoardService.publishPartialDecryptionAndProof(j, partialDecryptions, decryptionProof); }
From source file:demos.AsynchronousInsert.java
@Override public void run() { try {/*from w ww . j a v a 2 s. c om*/ logger.info("Preparing to insert metric data points"); Cluster cluster = Cluster.builder().addContactPoint("127.0.0.1").build(); Session session = cluster.connect("demo"); PreparedStatement insert = session .prepare("insert into metric_data (metric_id, time, value) values (?, ?, ?)"); Random random = new Random(); DateTime time = DateTime.now().minusYears(1); final CountDownLatch latch = new CountDownLatch(NUM_INSERTS); FutureCallback<ResultSet> callback = new FutureCallback<ResultSet>() { @Override public void onSuccess(ResultSet result) { latch.countDown(); } @Override public void onFailure(Throwable t) { logger.warn("There was an error inserting data", t); latch.countDown(); } }; Stopwatch stopwatch = new Stopwatch().start(); for (int i = 0; i < NUM_INSERTS; ++i) { String metricId = "metric-" + Math.abs(random.nextInt() % NUM_METRICS); double value = random.nextDouble(); ResultSetFuture future = session.executeAsync(insert.bind(metricId, time.toDate(), value)); time = time.plusSeconds(10); Futures.addCallback(future, callback); } latch.await(); stopwatch.stop(); logger.info("Finished inserting {} data points in {} ms", NUM_INSERTS, stopwatch.elapsed(TimeUnit.MILLISECONDS)); } catch (InterruptedException e) { logger.info("There was an interrupt while waiting for inserts to complete"); } }
From source file:com.cloudera.director.aws.ec2.ebs.EBSAllocator.java
/** * Waits for the volumes in a list of {@code InstanceEbsVolumes} to reach an available state. * Returns an updated list of {@code InstanceEbsVolumes} with the volumes that became * available marked as AVAILABLE and volumes that failed or timed out marked as FAILED. * * @param createdInstanceVolumes list of instances with their created ebs volumes * @return updated list of instances EBS volumes *///w w w .ja v a 2 s.com public List<InstanceEbsVolumes> waitUntilVolumesAvailable(List<InstanceEbsVolumes> createdInstanceVolumes) throws InterruptedException { Set<String> volumesToCheck = getAllVolumeIdsWithStatus(createdInstanceVolumes, InstanceEbsVolumes.Status.CREATED); int numRequestedVolumes = volumesToCheck.size(); Set<String> volumesAvailable = Sets.newHashSetWithExpectedSize(numRequestedVolumes); if (numRequestedVolumes > 0) { LOG.info("Waiting for a maximum of {} seconds for volumes to become available", availableTimeoutSeconds); Stopwatch watch = Stopwatch.createStarted(); while (watch.elapsed(TimeUnit.SECONDS) < availableTimeoutSeconds) { DescribeVolumesRequest volumeRequest = new DescribeVolumesRequest().withVolumeIds(volumesToCheck); try { List<Volume> volumes = client.describeVolumes(volumeRequest).getVolumes(); for (Volume volume : volumes) { String id = volume.getVolumeId(); VolumeState state = VolumeState.fromValue(volume.getState()); switch (state) { case Creating: break; case Available: volumesToCheck.remove(id); volumesAvailable.add(id); break; case Error: // TODO log why the volume failed which may need a separate api call volumesToCheck.remove(id); break; default: String err = String .format("A requested volume went into an unexpected state %s while waiting " + "for volume to become available", state); throw new IllegalStateException(String.format(err, state)); } } if (volumesToCheck.isEmpty()) { break; } } catch (AmazonServiceException ex) { // ignore exception when volume isn't found, newly created volumes may not be found right away if (ex.getErrorCode().equals("InvalidVolume.NotFound")) { LOG.info("Requested volume(s) not yet found"); } else { throw AWSExceptions.propagate(ex); } } LOG.info("Waiting on {} out of {} volumes to reach a final state, next check in {} seconds", volumesToCheck.size(), numRequestedVolumes, WAIT_UNTIL_AVAILABLE_INTERVAL_SECONDS); TimeUnit.SECONDS.sleep(WAIT_UNTIL_AVAILABLE_INTERVAL_SECONDS); } if (volumesToCheck.size() > 0) { LOG.error( "Timed out while waiting for volumes to be created, {} out of {} volumes became available", volumesAvailable.size(), numRequestedVolumes); } } else { LOG.info("Skipping wait for availability because no EBS volumes were created"); } // Update the status of each volume to AVAILABLE or FAILED based on the result List<InstanceEbsVolumes> updated = Lists.newArrayList(); for (InstanceEbsVolumes instanceEbsVolumes : createdInstanceVolumes) { Map<String, InstanceEbsVolumes.Status> updatedVolumes = Maps.newHashMap(); for (String volumeId : instanceEbsVolumes.getVolumeStatuses().keySet()) { InstanceEbsVolumes.Status updatedStatus = volumesAvailable.contains(volumeId) ? InstanceEbsVolumes.Status.AVAILABLE : InstanceEbsVolumes.Status.FAILED; updatedVolumes.put(volumeId, updatedStatus); } updated.add(new InstanceEbsVolumes(instanceEbsVolumes.getVirtualInstanceId(), instanceEbsVolumes.getEc2InstanceId(), updatedVolumes)); } return updated; }
From source file:org.glowroot.agent.it.harness.impl.JavaagentContainer.java
public JavaagentContainer(@Nullable File testDir, boolean embedded, List<String> extraJvmArgs) throws Exception { if (testDir == null) { this.testDir = TempDirs.createTempDir("glowroot-test-dir"); deleteTestDirOnClose = true;/*w ww .j av a 2s. com*/ } else { this.testDir = testDir; deleteTestDirOnClose = false; } // need to start heartbeat socket listener before spawning process heartbeatListenerSocket = new ServerSocket(0); heartbeatListenerExecutor = Executors.newSingleThreadExecutor(); heartbeatListenerExecutor.execute(new Runnable() { @Override public void run() { try { // TODO report checker framework issue that occurs without checkNotNull Socket socket = checkNotNull(heartbeatListenerSocket).accept(); InputStream socketIn = socket.getInputStream(); ByteStreams.exhaust(socketIn); } catch (IOException e) { logger.error(e.getMessage(), e); } } }); boolean pointingToCentral = false; for (String extraJvmArg : extraJvmArgs) { if (extraJvmArg.startsWith("-Dglowroot.collector.address=")) { pointingToCentral = true; break; } } int collectorPort; if (embedded || pointingToCentral) { collectorPort = 0; traceCollector = null; server = null; } else { collectorPort = LocalContainer.getAvailablePort(); traceCollector = new TraceCollector(); server = new GrpcServerWrapper(traceCollector, collectorPort); } int javaagentServicePort = LocalContainer.getAvailablePort(); List<String> command = buildCommand(heartbeatListenerSocket.getLocalPort(), collectorPort, javaagentServicePort, this.testDir, extraJvmArgs); ProcessBuilder processBuilder = new ProcessBuilder(command); processBuilder.redirectErrorStream(true); Process process = processBuilder.start(); consolePipeExecutor = Executors.newSingleThreadExecutor(); InputStream in = process.getInputStream(); // process.getInputStream() only returns null if ProcessBuilder.redirectOutput() is used // to redirect output to a file checkNotNull(in); consoleOutputPipe = new ConsoleOutputPipe(in, System.out); consolePipeFuture = consolePipeExecutor.submit(consoleOutputPipe); this.process = process; eventLoopGroup = EventLoopGroups.create("Glowroot-IT-Harness*-GRPC-Worker-ELG"); executor = Executors.newCachedThreadPool(new ThreadFactoryBuilder().setDaemon(true) .setNameFormat("Glowroot-IT-Harness*-GRPC-Executor-%d").build()); channel = NettyChannelBuilder.forAddress("localhost", javaagentServicePort).eventLoopGroup(eventLoopGroup) .executor(executor).negotiationType(NegotiationType.PLAINTEXT).build(); Stopwatch stopwatch = Stopwatch.createStarted(); // this can take a while on slow travis ci build machines while (stopwatch.elapsed(SECONDS) < 30) { try { JavaagentServiceBlockingStub javaagentService = JavaagentServiceGrpc.newBlockingStub(channel) .withCompression("gzip"); javaagentService.ping(Void.getDefaultInstance()); break; } catch (Exception e) { logger.debug(e.getMessage(), e); } MILLISECONDS.sleep(100); } javaagentService = JavaagentServiceGrpc.newBlockingStub(channel).withCompression("gzip"); if (server == null) { configService = null; javaagentService.initConfigForTests(Void.getDefaultInstance()); } else { configService = new ConfigServiceImpl(server, true); // need to set through config service so config service can keep track of changes, // otherwise it will clobber slow threshold value on next update through config service configService.resetConfigForTests(); } shutdownHook = new ShutdownHookThread(javaagentService); // unfortunately, ctrl-c during maven test will kill the maven process, but won't kill the // forked surefire jvm where the tests are being run // (http://jira.codehaus.org/browse/SUREFIRE-413), and so this hook won't get triggered by // ctrl-c while running tests under maven Runtime.getRuntime().addShutdownHook(shutdownHook); }
From source file:org.apache.eagle.alert.coordinator.Coordinator.java
public synchronized ScheduleState schedule(ScheduleOption option) throws TimeoutException { ExclusiveExecutor executor = new ExclusiveExecutor(zkConfig); AtomicReference<ScheduleState> reference = new AtomicReference<>(); try {// w w w . j a v a 2 s. c o m executor.execute(GREEDY_SCHEDULER_ZK_PATH, () -> { ScheduleState state = null; Stopwatch watch = Stopwatch.createStarted(); IScheduleContext context = new ScheduleContextBuilder(config, client).buildContext(); TopologyMgmtService mgmtService = new TopologyMgmtService(); IPolicyScheduler scheduler = PolicySchedulerFactory.createScheduler(); scheduler.init(context, mgmtService); state = scheduler.schedule(option); long scheduleTime = watch.elapsed(TimeUnit.MILLISECONDS); state.setScheduleTimeMillis((int) scheduleTime);// hardcode to integer watch.reset(); watch.start(); // persist & notify try (ConfigBusProducer producer = new ConfigBusProducer(ZKConfigBuilder.getZKConfig(config))) { postSchedule(client, state, producer); } watch.stop(); long postTime = watch.elapsed(TimeUnit.MILLISECONDS); LOG.info("Schedule result, schedule time {} ms, post schedule time {} ms !", scheduleTime, postTime); reference.set(state); currentState = state; }); } catch (TimeoutException e1) { LOG.error("time out when schedule", e1); throw e1; } finally { try { executor.close(); } catch (IOException e) { LOG.error("Exception when close exclusive executor, log and ignore!", e); } } return reference.get(); }
From source file:org.lenskit.eval.traintest.ExperimentJob.java
@Override protected void compute() { ExperimentOutputLayout layout = experiment.getOutputLayout(); TableWriter globalOutput = layout.prefixTable(experiment.getGlobalOutput(), dataSet, algorithm); TableWriter userOutput = layout.prefixTable(experiment.getUserOutput(), dataSet, algorithm); RowBuilder outputRow = globalOutput.getLayout().newRowBuilder(); logger.info("Building {} on {}", algorithm, dataSet); Stopwatch buildTimer = Stopwatch.createStarted(); try (LenskitRecommender rec = buildRecommender()) { buildTimer.stop();//from www .j av a2 s. co m logger.info("Built {} in {}", algorithm.getName(), buildTimer); logger.info("Measuring {} on {}", algorithm.getName(), dataSet.getName()); RowBuilder userRow = userOutput != null ? userOutput.getLayout().newRowBuilder() : null; Stopwatch testTimer = Stopwatch.createStarted(); List<ConditionEvaluator> accumulators = Lists.newArrayList(); for (EvalTask task : experiment.getTasks()) { ConditionEvaluator ce = task.createConditionEvaluator(algorithm, dataSet, rec); if (ce != null) { accumulators.add(ce); } else { logger.warn("Could not instantiate task {} for algorithm {} on data set {}", task, algorithm, dataSet); } } LongSet testUsers = dataSet.getTestData().getUserDAO().getUserIds(); UserEventDAO trainEvents = dataSet.getTrainingData().getUserEventDAO(); UserEventDAO userEvents = dataSet.getTestData().getUserEventDAO(); final NumberFormat pctFormat = NumberFormat.getPercentInstance(); pctFormat.setMaximumFractionDigits(2); pctFormat.setMinimumFractionDigits(2); final int nusers = testUsers.size(); logger.info("Testing {} on {} ({} users)", algorithm, dataSet, nusers); ProgressLogger progress = ProgressLogger.create(logger).setCount(nusers).setLabel("testing users") .start(); for (LongIterator iter = testUsers.iterator(); iter.hasNext();) { if (Thread.interrupted()) { throw new EvaluationException("eval job interrupted"); } long uid = iter.nextLong(); if (userRow != null) { userRow.add("User", uid); } UserHistory<Event> trainData = trainEvents.getEventsForUser(uid); if (trainData == null) { trainData = History.forUser(uid); } UserHistory<Event> userData = userEvents.getEventsForUser(uid); TestUser user = new TestUser(trainData, userData); Stopwatch userTimer = Stopwatch.createStarted(); for (ConditionEvaluator eval : accumulators) { Map<String, Object> ures = eval.measureUser(user); if (userRow != null) { userRow.addAll(ures); } } userTimer.stop(); if (userRow != null) { userRow.add("TestTime", userTimer.elapsed(TimeUnit.MILLISECONDS) * 0.001); assert userOutput != null; try { userOutput.writeRow(userRow.buildList()); } catch (IOException e) { throw new EvaluationException("error writing user row", e); } userRow.clear(); } progress.advance(); } progress.finish(); testTimer.stop(); logger.info("Tested {} in {}", algorithm.getName(), testTimer); outputRow.add("BuildTime", buildTimer.elapsed(TimeUnit.MILLISECONDS) * 0.001); outputRow.add("TestTime", testTimer.elapsed(TimeUnit.MILLISECONDS) * 0.001); for (ConditionEvaluator eval : accumulators) { outputRow.addAll(eval.finish()); } } catch (UncheckedInterruptException ex) { logger.info("evaluation interrupted"); throw ex; } catch (Throwable th) { logger.error("Error evaluating " + algorithm + " on " + dataSet, th); throw th; } try { globalOutput.writeRow(outputRow.buildList()); } catch (IOException e) { throw new EvaluationException("error writing output row", e); } }