List of usage examples for com.google.common.base Stopwatch createUnstarted
@CheckReturnValue public static Stopwatch createUnstarted()
From source file:com.persinity.ndt.datamutator.DataMutator.java
/** * @param configProps//from w w w . j a va2s . co m * @param configSource * @param dbConfigProps * @param dbConfigSource * @param quite */ public DataMutator(final Properties configProps, final String configSource, final Properties dbConfigProps, final String dbConfigSource, final boolean quite) { config = new DataMutatorConfig(configProps, configSource); log.info(config.toString()); try { entityFactory = (EntityFactory) config.getEntityFactoryClass().newInstance(); } catch (Exception e) { throw new RuntimeException(e); } entityPoolUtil = new EntityPoolUtil(new EntityPool(this.config.getRatio()), new RelDbTypeFactory()); entityFactory.init(dbConfigProps, dbConfigSource, entityPoolUtil); loadExecutors = new ArrayList<>(); threads = new ArrayList<>(); Reader consoleReader = null; if (!quite && this.config.getLoadType() == LoadType.TIME && this.config.getLoadQuantity() == -1) { consoleReader = ConsoleView.openConsoleReader(); } transactionDelayInMs = this.config.getTransactionDelayInMs(); dmlsPerTransaction = this.config.getDmlsPerTransaction(); scheduler = Executors.newSingleThreadScheduledExecutor(); runningTime = Stopwatch.createUnstarted(); logStatusTime = Stopwatch.createStarted(); this.quite = quite; view = new ConsoleView(this, consoleReader, quite); view.logMsg(NAME_VERSION); view.logMsg(entityFactory.getConnectionInfo()); log.info("{}", BuildInfo.getInstance()); }
From source file:org.apache.gobblin.source.extractor.extract.kafka.KafkaExtractor.java
public KafkaExtractor(WorkUnitState state) { super(state); this.workUnitState = state; this.topicName = KafkaUtils.getTopicName(state); this.partitions = KafkaUtils.getPartitions(state); this.lowWatermark = state.getWorkunit().getLowWatermark(MultiLongWatermark.class); this.highWatermark = state.getWorkunit().getExpectedHighWatermark(MultiLongWatermark.class); this.nextWatermark = new MultiLongWatermark(this.lowWatermark); this.kafkaConsumerClientResolver = new ClassAliasResolver<>(GobblinKafkaConsumerClientFactory.class); try {/*from w w w. j av a2 s. c o m*/ this.kafkaConsumerClient = this.closer.register(this.kafkaConsumerClientResolver .resolveClass(state.getProp(KafkaSource.GOBBLIN_KAFKA_CONSUMER_CLIENT_FACTORY_CLASS, KafkaSource.DEFAULT_GOBBLIN_KAFKA_CONSUMER_CLIENT_FACTORY_CLASS)) .newInstance().create(ConfigUtils.propertiesToConfig(state.getProperties()))); } catch (InstantiationException | IllegalAccessException | ClassNotFoundException e) { throw new RuntimeException(e); } this.stopwatch = Stopwatch.createUnstarted(); this.decodingErrorCount = Maps.newHashMap(); this.avgMillisPerRecord = Maps.newHashMapWithExpectedSize(this.partitions.size()); this.avgRecordSizes = Maps.newHashMapWithExpectedSize(this.partitions.size()); this.elapsedTime = Maps.newHashMapWithExpectedSize(this.partitions.size()); this.processedRecordCount = Maps.newHashMapWithExpectedSize(this.partitions.size()); this.decodeRecordTime = Maps.newHashMapWithExpectedSize(this.partitions.size()); this.fetchMessageBufferTime = Maps.newHashMapWithExpectedSize(this.partitions.size()); this.readRecordTime = Maps.newHashMapWithExpectedSize(this.partitions.size()); this.errorPartitions = Sets.newHashSet(); // The actual high watermark starts with the low watermark this.workUnitState.setActualHighWatermark(this.lowWatermark); }
From source file:com.cinchapi.concourse.importer.cli.ImportCli.java
@Override protected void doTask() { final ImportOptions opts = (ImportOptions) options; final Set<Long> records; final Constructor<? extends Importer> constructor = getConstructor(opts.type); if (opts.data == null) { // Import data from stdin Importer importer = Reflection.newInstance(constructor, concourse); if (!opts.dynamic.isEmpty()) { importer.setParams(options.dynamic); }//from w ww . jav a2 s . c om if (importer instanceof Headered && !opts.header.isEmpty()) { ((Headered) importer).parseHeader(opts.header); } try { ConsoleReader reader = new ConsoleReader(); String line; records = Sets.newLinkedHashSet(); Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() { // Interactive import is ended when user presses CTRL + C, // so we need this shutdown hook to ensure that they get // feedback about the import before the JVM dies. @Override public void run() { if (options.verbose) { System.out.println(records); } System.out.println(Strings.format("Imported data into {} records", records.size())); } })); try { final AtomicBoolean lock = new AtomicBoolean(false); new Thread(new Runnable() { // If there is no input in // 100ms, assume that the // session is interactive (i.e. // not piped) and display a // prompt @Override public void run() { try { Thread.sleep(100); if (lock.compareAndSet(false, true)) { System.out.println("Importing from stdin. Press " + "CTRL + C when finished"); } } catch (InterruptedException e) { } } }).start(); while ((line = reader.readLine()) != null) { try { lock.set(true); records.addAll(importer.importString(line)); } catch (Exception e) { System.err.println(e); } } } catch (IOException e) { throw Throwables.propagate(e); } } catch (IOException e) { throw Throwables.propagate(e); } finally { try { TerminalFactory.get().restore(); } catch (Exception e) { throw Throwables.propagate(e); } } } else { String path = FileOps.expandPath(opts.data, getLaunchDirectory()); Collection<String> files = FileOps.isDirectory(path) ? scan(Paths.get(path)) : ImmutableList.of(path); Stopwatch watch = Stopwatch.createUnstarted(); if (files.size() > 1) { records = Sets.newConcurrentHashSet(); final Queue<String> filesQueue = (Queue<String>) files; List<Runnable> runnables = Lists.newArrayListWithCapacity(opts.numThreads); // Create just enough Runnables with instantiated Importers in // advance. Each of those Runnables will work until #filesQueue // is exhausted. opts.numThreads = Math.min(opts.numThreads, files.size()); for (int i = 0; i < opts.numThreads; ++i) { final Importer importer0 = Reflection.newInstance(constructor, i == 0 ? concourse : Concourse.connect(opts.host, opts.port, opts.username, opts.password, opts.environment)); if (!opts.dynamic.isEmpty()) { importer0.setParams(opts.dynamic); } if (importer0 instanceof Headered && !opts.header.isEmpty()) { ((Headered) importer0).parseHeader(opts.header); } runnables.add(new Runnable() { private final Importer importer = importer0; @Override public void run() { String file; while ((file = filesQueue.poll()) != null) { records.addAll(importer.importFile(file)); } } }); } ExecutorService executor = Executors.newFixedThreadPool(runnables.size()); System.out.println("Starting import..."); watch.start(); for (Runnable runnable : runnables) { executor.execute(runnable); } executor.shutdown(); try { if (!executor.awaitTermination(1, TimeUnit.MINUTES)) { while (!executor.isTerminated()) { System.out.print('.'); // block until all tasks are // completed and provide some // feedback to the user } } } catch (InterruptedException e) { throw Throwables.propagate(e); } } else { Importer importer = Reflection.newInstance(constructor, concourse); if (!opts.dynamic.isEmpty()) { importer.setParams(opts.dynamic); } if (importer instanceof Headered && !opts.header.isEmpty()) { ((Headered) importer).parseHeader(opts.header); } System.out.println("Starting import..."); watch.start(); records = importer.importFile(files.iterator().next()); } watch.stop(); long elapsed = watch.elapsed(TimeUnit.MILLISECONDS); double seconds = elapsed / 1000.0; if (options.verbose) { System.out.println(records); } System.out.println(MessageFormat.format("Imported data " + "into {0} records in {1} seconds", records.size(), seconds)); } }
From source file:fr.inria.eventcloud.overlay.can.StaticLoadBalancingTestBuilder.java
public Test build() { return new Test() { private static final String CENTROID_SHORT_RDF_TERM_PREFIX = "http://aaa"; private static final String CENTROID_LONG_RDF_TERM_PREFIX = "http://zzz"; @Override// ww w .ja v a 2 s. co m protected void _execute() throws EventCloudIdNotManaged, NetworkAlreadyJoinedException, FileNotFoundException, PeerNotActivatedException { if (StaticLoadBalancingTestBuilder.this.enableLoadBalancing) { EventCloudProperties.STATIC_LOAD_BALANCING.setValue(true); } EventCloudProperties.RECORD_STATS_MISC_DATASTORE.setValue(true); if (StaticLoadBalancingTestBuilder.this.statsRecorderClass != null) { EventCloudProperties.STATS_RECORDER_CLASS .setValue(StaticLoadBalancingTestBuilder.this.statsRecorderClass); } this.eventCloudId = this.deployer.newEventCloud(1, 1); SemanticPeer firstPeer = this.deployer.getRandomSemanticPeer(this.eventCloudId); final PutGetApi putgetProxy = ProxyFactory.newPutGetProxy(this.deployer.getEventCloudsRegistryUrl(), this.eventCloudId); final Stopwatch stopwatch = Stopwatch.createUnstarted(); Node graph = null; if (StaticLoadBalancingTestBuilder.this.trigResource == null) { if (this.simulateCompoundEvents()) { graph = NodeGenerator.randomUri(StaticLoadBalancingTestBuilder.this.rdfTermSize); } int tmpNbQuadsToInsert = StaticLoadBalancingTestBuilder.this.nbQuadsToInsert; if (this.isCentroidStatsRecorderUsed() && StaticLoadBalancingTestBuilder.this.nbPeersToInject > 0) { tmpNbQuadsToInsert = StaticLoadBalancingTestBuilder.this.nbQuadsToInsert / 3 * 2; } for (int i = 0; i < tmpNbQuadsToInsert; i++) { Quadruple quad = null; if (this.simulateCompoundEvents() && i % StaticLoadBalancingTestBuilder.this.nbQuadsPerCompoundEvent == 0) { if (this.isCentroidStatsRecorderUsed() && StaticLoadBalancingTestBuilder.this.nbPeersToInject > 1) { graph = NodeGenerator.randomUri(CENTROID_SHORT_RDF_TERM_PREFIX, StaticLoadBalancingTestBuilder.this.rdfTermSize); } else { graph = NodeGenerator.randomUri(StaticLoadBalancingTestBuilder.this.rdfTermSize); } } quad = this.buildQuadruple(graph, StaticLoadBalancingTestBuilder.this.rdfTermSize); stopwatch.start(); putgetProxy.add(quad); stopwatch.stop(); } } else { List<Quadruple> quads = StaticLoadBalancingTestBuilder.this .loadEvents(StaticLoadBalancingTestBuilder.this.trigResource); StaticLoadBalancingTestBuilder.this.nbQuadsToInsert = quads.size(); LOG.info("{} quadruples loaded from {}", quads.size(), StaticLoadBalancingTestBuilder.this.trigResource); for (Quadruple q : quads) { stopwatch.start(); putgetProxy.add(q); stopwatch.stop(); } } if (StaticLoadBalancingTestBuilder.this.insertSkewedData && this.isCentroidStatsRecorderUsed() && StaticLoadBalancingTestBuilder.this.nbPeersToInject > 0) { // add 1/3 of the data which are 10 times longer int longRdfTermSize = StaticLoadBalancingTestBuilder.this.rdfTermSize * 10; if (this.simulateCompoundEvents()) { graph = NodeGenerator.randomUri(CENTROID_LONG_RDF_TERM_PREFIX, longRdfTermSize); } for (int i = 0; i < StaticLoadBalancingTestBuilder.this.nbQuadsToInsert / 3; i++) { Quadruple quad = null; if (this.simulateCompoundEvents() && i % StaticLoadBalancingTestBuilder.this.nbQuadsPerCompoundEvent == 0) { graph = NodeGenerator.randomUri(CENTROID_LONG_RDF_TERM_PREFIX + longRdfTermSize); } quad = this.buildQuadruple(graph, longRdfTermSize); stopwatch.start(); putgetProxy.add(quad); stopwatch.stop(); } } LOG.info("It took {} to insert {} quadruples", stopwatch.toString(), StaticLoadBalancingTestBuilder.this.nbQuadsToInsert); this.executionTime = stopwatch.elapsed(TimeUnit.MILLISECONDS); if (StaticLoadBalancingTestBuilder.this.nbPeersToInject > 0) { LOG.info("Before join, first peer dump:\n" + firstPeer.dump()); for (int i = 0; i < StaticLoadBalancingTestBuilder.this.nbPeersToInject; i++) { long maxNumQuads = -1; Peer electedPeer = null; List<Peer> peers = this.deployer.getRandomSemanticTracker(this.eventCloudId).getPeers(); // we select the peer which has the higher number of // quadruples in the misc datastore in order to // perform the next split for (Peer p : peers) { GetStatsRecordeResponseOperation response = (GetStatsRecordeResponseOperation) PAFuture .getFutureValue(p.receive(new GetStatsRecorderOperation())); if (response.getStatsRecorder().getNbQuadruples() > maxNumQuads) { maxNumQuads = response.getStatsRecorder().getNbQuadruples(); electedPeer = p; } } Peer newPeer = SemanticFactory.newSemanticPeer(new SemanticOverlayProvider(true)); newPeer.join(electedPeer); this.deployer.getRandomSemanticTracker(this.eventCloudId).storePeer(newPeer); LOG.info("Join operation " + (i + 1)); } LOG.info("After injections, other peers dump:\n"); for (Peer p : this.deployer.getRandomSemanticTracker(this.eventCloudId).getPeers()) { LOG.info(p.dump()); } if (StaticLoadBalancingTestBuilder.this.nbLookupsAfterJoinOperations > 0) { for (int i = 0; i < StaticLoadBalancingTestBuilder.this.nbLookupsAfterJoinOperations; i++) { // long size = putgetProxy.find(QuadruplePattern.ANY).size(); // Assert.assertEquals( // StaticLoadBalancingTestBuilder.this.nbQuadsToInsert, // size); } } } else { LOG.info("Peer dump:\n" + firstPeer.dump()); } ComponentUtils.terminateComponent(putgetProxy); } private Quadruple buildQuadruple(Node graph, int rdfTermSize) { if (this.simulateCompoundEvents()) { if (this.isCentroidStatsRecorderUsed() && StaticLoadBalancingTestBuilder.this.nbPeersToInject > 1) { if (rdfTermSize > StaticLoadBalancingTestBuilder.this.rdfTermSize) { return QuadrupleGenerator.randomWithoutLiteral(graph, CENTROID_LONG_RDF_TERM_PREFIX, rdfTermSize); } else { return QuadrupleGenerator.randomWithoutLiteral(graph, CENTROID_SHORT_RDF_TERM_PREFIX, rdfTermSize); } } else { if (graph == null) { return QuadrupleGenerator.randomWithoutLiteral(rdfTermSize); } else { return QuadrupleGenerator.randomWithoutLiteral(graph, rdfTermSize); } } } else { if (this.isCentroidStatsRecorderUsed() && StaticLoadBalancingTestBuilder.this.nbPeersToInject > 1) { if (rdfTermSize > StaticLoadBalancingTestBuilder.this.rdfTermSize) { return QuadrupleGenerator.randomWithoutLiteral(CENTROID_LONG_RDF_TERM_PREFIX, rdfTermSize); } else { return QuadrupleGenerator.randomWithoutLiteral(CENTROID_SHORT_RDF_TERM_PREFIX, rdfTermSize); } } else { if (graph == null) { return QuadrupleGenerator.randomWithoutLiteral(rdfTermSize); } else { return QuadrupleGenerator.randomWithoutLiteral(graph, rdfTermSize); } } } } private boolean isCentroidStatsRecorderUsed() { return (StaticLoadBalancingTestBuilder.this.statsRecorderClass != null) && (StaticLoadBalancingTestBuilder.this.statsRecorderClass .isAssignableFrom(CentroidStatsRecorder.class)); } private boolean simulateCompoundEvents() { return StaticLoadBalancingTestBuilder.this.nbQuadsPerCompoundEvent != -1; } }; }
From source file:io.druid.server.coordinator.DruidCoordinatorBalancerProfiler.java
public void profileRun() { Stopwatch watch = Stopwatch.createUnstarted(); LoadQueuePeonTester fromPeon = new LoadQueuePeonTester(); LoadQueuePeonTester toPeon = new LoadQueuePeonTester(); EasyMock.expect(druidServer1.getName()).andReturn("from").atLeastOnce(); EasyMock.expect(druidServer1.getCurrSize()).andReturn(30L).atLeastOnce(); EasyMock.expect(druidServer1.getMaxSize()).andReturn(100L).atLeastOnce(); EasyMock.expect(druidServer1.getSegments()).andReturn(segments).anyTimes(); EasyMock.expect(druidServer1.getSegment(EasyMock.<String>anyObject())).andReturn(null).anyTimes(); EasyMock.replay(druidServer1);//from w ww . jav a 2 s . c o m EasyMock.expect(druidServer2.getName()).andReturn("to").atLeastOnce(); EasyMock.expect(druidServer2.getTier()).andReturn("normal").anyTimes(); EasyMock.expect(druidServer2.getCurrSize()).andReturn(0L).atLeastOnce(); EasyMock.expect(druidServer2.getMaxSize()).andReturn(100L).atLeastOnce(); EasyMock.expect(druidServer2.getSegments()).andReturn(new HashMap<String, DataSegment>()).anyTimes(); EasyMock.expect(druidServer2.getSegment(EasyMock.<String>anyObject())).andReturn(null).anyTimes(); EasyMock.replay(druidServer2); coordinator.moveSegment(EasyMock.<ImmutableDruidServer>anyObject(), EasyMock.<ImmutableDruidServer>anyObject(), EasyMock.<String>anyObject(), EasyMock.<LoadPeonCallback>anyObject()); EasyMock.expectLastCall().anyTimes(); EasyMock.replay(coordinator); DruidCoordinatorRuntimeParams params = DruidCoordinatorRuntimeParams.newBuilder() .withDruidCluster( new DruidCluster(ImmutableMap.<String, MinMaxPriorityQueue<ServerHolder>>of("normal", MinMaxPriorityQueue.orderedBy(DruidCoordinatorBalancerTester.percentUsedComparator) .create(Arrays.asList(new ServerHolder(druidServer1, fromPeon), new ServerHolder(druidServer2, toPeon)))))) .withLoadManagementPeons(ImmutableMap.<String, LoadQueuePeon>of("from", fromPeon, "to", toPeon)) .withAvailableSegments(segments.values()) .withDynamicConfigs( new CoordinatorDynamicConfig.Builder().withMaxSegmentsToMove(MAX_SEGMENTS_TO_MOVE).build()) .withBalancerReferenceTimestamp(new DateTime("2013-01-01")).build(); DruidCoordinatorBalancerTester tester = new DruidCoordinatorBalancerTester(coordinator); watch.start(); DruidCoordinatorRuntimeParams balanceParams = tester.run(params); System.out.println(watch.stop()); }
From source file:org.opendaylight.controller.clustering.it.provider.CarProvider.java
@Override public Future<RpcResult<Void>> stressTest(StressTestInput input) { final int inputRate; final long inputCount; // If rate is not provided, or given as zero, then just return. if ((input.getRate() == null) || (input.getRate() == 0)) { log.info("Exiting stress test as no rate is given."); return Futures.immediateFuture( RpcResultBuilder.<Void>failed().withError(ErrorType.PROTOCOL, "invalid rate").build()); } else {//from w w w . ja v a 2s . c o m inputRate = input.getRate(); } if (input.getCount() != null) { inputCount = input.getCount(); } else { inputCount = 0; } log.info("Stress test starting : rate: {} count: {}", inputRate, inputCount); stopThread(); // clear counters succcessCounter.set(0); failureCounter.set(0); WriteTransaction tx = dataProvider.newWriteOnlyTransaction(); InstanceIdentifier<Cars> carsId = InstanceIdentifier.<Cars>builder(Cars.class).build(); tx.merge(LogicalDatastoreType.CONFIGURATION, carsId, new CarsBuilder().build()); try { tx.submit().checkedGet(5, TimeUnit.SECONDS); } catch (TransactionCommitFailedException | TimeoutException e) { log.error("Put Cars failed", e); return Futures.immediateFuture(RpcResultBuilder.<Void>success().build()); } stopThread = false; final long sleep = TimeUnit.NANOSECONDS.convert(1000, TimeUnit.MILLISECONDS) / inputRate; final Stopwatch sw = Stopwatch.createUnstarted(); testThread = new Thread() { @Override public void run() { sw.start(); AtomicLong count = new AtomicLong(); while (!stopThread) { long id = count.incrementAndGet(); WriteTransaction tx = dataProvider.newWriteOnlyTransaction(); CarEntry car = new CarEntryBuilder().setId(new CarId("car" + id)).build(); tx.put(LogicalDatastoreType.CONFIGURATION, InstanceIdentifier.<Cars>builder(Cars.class) .child(CarEntry.class, car.getKey()).build(), car); CheckedFuture<Void, TransactionCommitFailedException> future = tx.submit(); Futures.addCallback(future, new FutureCallback<Void>() { @Override public void onSuccess(final Void result) { // Transaction succeeded succcessCounter.getAndIncrement(); } @Override public void onFailure(final Throwable t) { // Transaction failed failureCounter.getAndIncrement(); LOG.error("Put Cars failed", t); } }); try { TimeUnit.NANOSECONDS.sleep(sleep); } catch (InterruptedException e) { break; } if ((count.get() % 1000) == 0) { log.info("Cars created {}, time: {}", count.get(), sw.elapsed(TimeUnit.SECONDS)); } // Check if a count is specified in input and we have created that many cars. if ((inputCount != 0) && (count.get() >= inputCount)) { stopThread = true; } } log.info("Stress test thread stopping after creating {} cars.", count.get()); } }; testThread.start(); return Futures.immediateFuture(RpcResultBuilder.<Void>success().build()); }
From source file:org.apache.hive.ptest.execution.HostExecutor.java
/** * Executes parallel test until the parallel work queue is empty. Then * executes the isolated tests on the host. During each phase if a * AbortDroneException is thrown the drone is removed possibly * leaving this host with zero functioning drones. If all drones * are removed the host will be replaced before the next run. */// w w w. j ava 2s . c o m private void executeTests(final BlockingQueue<TestBatch> parallelWorkQueue, final BlockingQueue<TestBatch> isolatedWorkQueue, final Set<TestBatch> failedTestResults) throws Exception { if (mShutdown) { mLogger.warn("Shutting down host " + mHost.getName()); return; } mLogger.info("Starting parallel execution on " + mHost.getName()); List<ListenableFuture<Void>> droneResults = Lists.newArrayList(); for (final Drone drone : ImmutableList.copyOf(mDrones)) { droneResults.add(mExecutor.submit(new Callable<Void>() { @Override public Void call() throws Exception { TestBatch batch = null; Stopwatch sw = Stopwatch.createUnstarted(); try { do { batch = parallelWorkQueue.poll(mNumPollSeconds, TimeUnit.SECONDS); if (mShutdown) { mLogger.warn("Shutting down host " + mHost.getName()); return null; } if (batch != null) { numParallelBatchesProcessed++; sw.reset().start(); try { if (!executeTestBatch(drone, batch, failedTestResults)) { failedTestResults.add(batch); } } finally { sw.stop(); mLogger.info( "Finished processing parallel batch [{}] on host {}. ElapsedTime(ms)={}", new Object[] { batch.getName(), getHost().toShortString(), sw.elapsed(TimeUnit.MILLISECONDS) }); } } } while (!mShutdown && !parallelWorkQueue.isEmpty()); } catch (AbortDroneException ex) { mDrones.remove(drone); // return value not checked due to concurrent access mLogger.error("Aborting drone during parallel execution", ex); if (batch != null) { Preconditions.checkState(parallelWorkQueue.add(batch), "Could not add batch to parallel queue " + batch); } } return null; } })); } if (mShutdown) { mLogger.warn("Shutting down host " + mHost.getName()); return; } Futures.allAsList(droneResults).get(); mLogger.info("Starting isolated execution on " + mHost.getName()); for (Drone drone : ImmutableList.copyOf(mDrones)) { TestBatch batch = null; Stopwatch sw = Stopwatch.createUnstarted(); try { do { batch = isolatedWorkQueue.poll(mNumPollSeconds, TimeUnit.SECONDS); if (batch != null) { numIsolatedBatchesProcessed++; sw.reset().start(); try { if (!executeTestBatch(drone, batch, failedTestResults)) { failedTestResults.add(batch); } } finally { sw.stop(); mLogger.info("Finished processing isolated batch [{}] on host {}. ElapsedTime(ms)={}", new Object[] { batch.getName(), getHost().toShortString(), sw.elapsed(TimeUnit.MILLISECONDS) }); } } } while (!mShutdown && !isolatedWorkQueue.isEmpty()); } catch (AbortDroneException ex) { mDrones.remove(drone); // return value not checked due to concurrent access mLogger.error("Aborting drone during isolated execution", ex); if (batch != null) { Preconditions.checkState(isolatedWorkQueue.add(batch), "Could not add batch to isolated queue " + batch); } } } }
From source file:org.apache.druid.server.coordinator.DruidCoordinatorBalancerProfiler.java
public void profileRun() { Stopwatch watch = Stopwatch.createUnstarted(); LoadQueuePeonTester fromPeon = new LoadQueuePeonTester(); LoadQueuePeonTester toPeon = new LoadQueuePeonTester(); EasyMock.expect(druidServer1.getName()).andReturn("from").atLeastOnce(); EasyMock.expect(druidServer1.getCurrSize()).andReturn(30L).atLeastOnce(); EasyMock.expect(druidServer1.getMaxSize()).andReturn(100L).atLeastOnce(); EasyMock.expect(druidServer1.getSegments()).andReturn(segments).anyTimes(); EasyMock.expect(druidServer1.getSegment(EasyMock.anyObject())).andReturn(null).anyTimes(); EasyMock.replay(druidServer1);//ww w.j av a 2 s.co m EasyMock.expect(druidServer2.getName()).andReturn("to").atLeastOnce(); EasyMock.expect(druidServer2.getTier()).andReturn("normal").anyTimes(); EasyMock.expect(druidServer2.getCurrSize()).andReturn(0L).atLeastOnce(); EasyMock.expect(druidServer2.getMaxSize()).andReturn(100L).atLeastOnce(); EasyMock.expect(druidServer2.getSegments()).andReturn(new HashMap<String, DataSegment>()).anyTimes(); EasyMock.expect(druidServer2.getSegment(EasyMock.anyObject())).andReturn(null).anyTimes(); EasyMock.replay(druidServer2); coordinator.moveSegment(EasyMock.anyObject(), EasyMock.anyObject(), EasyMock.anyObject(), EasyMock.anyObject()); EasyMock.expectLastCall().anyTimes(); EasyMock.replay(coordinator); DruidCoordinatorRuntimeParams params = DruidCoordinatorRuntimeParams.newBuilder() .withDruidCluster(new DruidCluster(null, ImmutableMap.of("normal", Stream.of(new ServerHolder(druidServer1, fromPeon), new ServerHolder(druidServer2, toPeon)) .collect(Collectors.toCollection( () -> new TreeSet<>(DruidCoordinatorBalancer.percentUsedComparator)))))) .withLoadManagementPeons(ImmutableMap.of("from", fromPeon, "to", toPeon)) .withAvailableSegments(segments.values()) .withDynamicConfigs( CoordinatorDynamicConfig.builder().withMaxSegmentsToMove(MAX_SEGMENTS_TO_MOVE).build()) .withBalancerReferenceTimestamp(DateTimes.of("2013-01-01")).build(); DruidCoordinatorBalancerTester tester = new DruidCoordinatorBalancerTester(coordinator); watch.start(); DruidCoordinatorRuntimeParams balanceParams = tester.run(params); System.out.println(watch.stop()); }
From source file:org.opendaylight.controller.config.persist.impl.ConfigPusherImpl.java
private synchronized boolean pushConfigWithConflictingVersionRetries(ConfigSnapshotHolder configSnapshotHolder) throws ConfigSnapshotFailureException { ConflictingVersionException lastException; Stopwatch stopwatch = Stopwatch.createUnstarted(); do {/*from w w w .java2s. c o m*/ //TODO wait untill all expected modules are in yangStoreService, do we even need to with yangStoreService instead on netconfOperationService? String idForReporting = configSnapshotHolder.toString(); SortedSet<String> expectedCapabilities = checkNotNull(configSnapshotHolder.getCapabilities(), "Expected capabilities must not be null - %s, check %s", idForReporting, configSnapshotHolder.getClass().getName()); // wait max time for required capabilities to appear waitForCapabilities(expectedCapabilities, idForReporting); try { if (!stopwatch.isRunning()) { stopwatch.start(); } return pushConfig(configSnapshotHolder); } catch (ConflictingVersionException e) { lastException = e; LOG.info("Conflicting version detected, will retry after timeout"); sleep(); } } while (stopwatch.elapsed(TimeUnit.MILLISECONDS) < conflictingVersionTimeoutMillis); throw new IllegalStateException("Max wait for conflicting version stabilization timeout after " + stopwatch.elapsed(TimeUnit.MILLISECONDS) + " ms", lastException); }
From source file:com.google.gcloud.RetryHelper.java
public static <V> V runWithRetries(Callable<V> callable, RetryParams params, ExceptionHandler exceptionHandler) throws RetryHelperException { return runWithRetries(callable, params, exceptionHandler, Stopwatch.createUnstarted()); }