List of usage examples for java.util.concurrent.atomic AtomicInteger incrementAndGet
public final int incrementAndGet()
From source file:org.alfresco.bm.event.AbstractResultService.java
/** * {@inheritDoc}/* ww w .j a va 2 s . c o m*/ */ @Override public void getResults(ResultHandler handler, long startTime, long windowSize, long reportPeriod, boolean chartOnly) { /* * Keep track of all events' statistics. * It is possible to report more frequently than the window size. * For each report period in the reporting window, the statistics for the events need to be maintained. */ if (handler == null) { throw new IllegalArgumentException("A result handler must be supplied."); } if (windowSize <= 0L) { throw new IllegalArgumentException("'windowSize' must be a non-zero, positive number."); } if (reportPeriod <= 0L) { throw new IllegalArgumentException("'reportPeriod' must be a non-zero, positive number."); } if (reportPeriod > windowSize) { throw new IllegalArgumentException("'reportPeriod' cannot more than the 'windowSize'."); } if (windowSize % reportPeriod != 0L) { throw new IllegalArgumentException("'windowSize' must be a multiple of 'reportPeriod'."); } // We have to keep statistics for each reporting period int windowMultiple = (int) (windowSize / reportPeriod); // Build stats for reporting back // Each LinkedList will have 'windowMultiple' entries. // The newest statistics will be the last in the linked list; results will be reported from the first entry each time. Map<String, LinkedList<DescriptiveStatistics>> statsByEventName = new HashMap<String, LinkedList<DescriptiveStatistics>>( 13); Map<String, LinkedList<AtomicInteger>> failuresByEventName = new HashMap<String, LinkedList<AtomicInteger>>( 13); // Our even queries use separate windows EventRecord firstResult = getFirstResult(); if (firstResult == null) { // There is nothing return; } long firstResultStartTime = firstResult.getStartTime(); EventRecord lastResult = getLastResult(); long lastResultStartTime = lastResult.getStartTime(); long queryWindowStartTime = Math.max(firstResultStartTime, startTime); // The start time is inclusive long queryWindowSize = lastResult.getStartTime() - firstResult.getStartTime(); if (queryWindowSize < 60000L) { queryWindowSize = 60000L; // Query window is at least a minute } else if (queryWindowSize > (60000L * 60L)) { queryWindowSize = 60000L * 60L; // Query window is at most an hour } long queryWindowEndTime = queryWindowStartTime + queryWindowSize; // Rebase the aggregation window to encompasse the first event long currentWindowEndTime = (long) Math.floor((firstResultStartTime + reportPeriod) / reportPeriod) * reportPeriod; long currentWindowStartTime = currentWindowEndTime - windowSize; // Iterate over the results int skip = 0; int limit = 10000; boolean stop = false; boolean unreportedResults = false; breakStop: while (!stop) { List<EventRecord> results = getResults(queryWindowStartTime, queryWindowEndTime, chartOnly, skip, limit); if (results.size() == 0) { if (queryWindowEndTime > lastResultStartTime) { // The query window has included the last event, so we have extracted all results if (unreportedResults) { // The query window ends in the future, so we are done reportAndCycleStats(statsByEventName, failuresByEventName, currentWindowStartTime, currentWindowEndTime, windowMultiple, handler); unreportedResults = false; } stop = true; } else { // Move the query window up queryWindowStartTime = queryWindowEndTime; queryWindowEndTime += queryWindowSize; // Reset the skip count as we are in a new query window skip = 0; } // We continue continue; } // Process each result found in the query window for (EventRecord eventRecord : results) { String eventRecordName = eventRecord.getEvent().getName(); long eventRecordStartTime = eventRecord.getStartTime(); long eventRecordTime = eventRecord.getTime(); boolean eventRecordSuccess = eventRecord.isSuccess(); // If the current event is past the reporting period, then report if (eventRecordStartTime >= currentWindowEndTime) { // Report the current stats stop = reportAndCycleStats(statsByEventName, failuresByEventName, currentWindowStartTime, currentWindowEndTime, windowMultiple, handler); unreportedResults = false; // Shift the window up by one report period currentWindowStartTime += reportPeriod; currentWindowEndTime += reportPeriod; // Check for stop if (stop) { break breakStop; } } // Increase the skip with each window result skip++; // Ignore results we don't wish to chart if (chartOnly && !eventRecord.isChart()) { continue; } // We have to report this result at some point unreportedResults = true; // Get the linked list of stats for the event LinkedList<DescriptiveStatistics> eventStatsLL = statsByEventName.get(eventRecordName); if (eventStatsLL == null) { // Create a LL for the event eventStatsLL = new LinkedList<DescriptiveStatistics>(); statsByEventName.put(eventRecordName, eventStatsLL); // We need at least one entry in order to record stats eventStatsLL.add(new DescriptiveStatistics()); } // Write the current event to all the stats for the event for (DescriptiveStatistics eventStats : eventStatsLL) { eventStats.addValue(eventRecordTime); } // Get the linked list of failure counts for the event LinkedList<AtomicInteger> eventFailuresLL = failuresByEventName.get(eventRecordName); if (eventFailuresLL == null) { // Create a LL for the event eventFailuresLL = new LinkedList<AtomicInteger>(); failuresByEventName.put(eventRecordName, eventFailuresLL); // Need one entry to record failures eventFailuresLL.add(new AtomicInteger(0)); } // Write any failures to all counts for the event if (!eventRecordSuccess) { for (AtomicInteger eventFailures : eventFailuresLL) { eventFailures.incrementAndGet(); } } } } }
From source file:org.apache.hadoop.gateway.hdfs.dispatch.WebHdfsHaDispatch.java
private void failoverRequest(HttpUriRequest outboundRequest, HttpServletRequest inboundRequest, HttpServletResponse outboundResponse, HttpResponse inboundResponse, Exception exception) throws IOException { LOG.failingOverRequest(outboundRequest.getURI().toString()); AtomicInteger counter = (AtomicInteger) inboundRequest.getAttribute(FAILOVER_COUNTER_ATTRIBUTE); if (counter == null) { counter = new AtomicInteger(0); }//from ww w . j a v a 2 s . c om inboundRequest.setAttribute(FAILOVER_COUNTER_ATTRIBUTE, counter); if (counter.incrementAndGet() <= maxFailoverAttempts) { haProvider.markFailedURL(RESOURCE_ROLE, outboundRequest.getURI().toString()); //null out target url so that rewriters run again inboundRequest.setAttribute(AbstractGatewayFilter.TARGET_REQUEST_URL_ATTRIBUTE_NAME, null); URI uri = getDispatchUrl(inboundRequest); ((HttpRequestBase) outboundRequest).setURI(uri); if (failoverSleep > 0) { try { Thread.sleep(failoverSleep); } catch (InterruptedException e) { LOG.failoverSleepFailed(RESOURCE_ROLE, e); } } executeRequest(outboundRequest, inboundRequest, outboundResponse); } else { LOG.maxFailoverAttemptsReached(maxFailoverAttempts, RESOURCE_ROLE); if (inboundResponse != null) { writeOutboundResponse(outboundRequest, inboundRequest, outboundResponse, inboundResponse); } else { throw new IOException(exception); } } }
From source file:ch.windmobile.server.socialmodel.mogodb.HeavyLoadTest.java
public void testFullChatCycle() throws Exception { ServiceLocator locator = new MongoDBServiceLocator().connect(null); try {/*from ww w.ja va 2 s . c om*/ final int CNT = 50000; final Executor executor = Executors.newFixedThreadPool(10); final ChatService chatService = locator.getService(ChatService.class); final AtomicInteger counter = new AtomicInteger(); final CountDownLatch latch = new CountDownLatch(CNT); for (int i = 0; i < CNT; i++) { executor.execute(new Runnable() { @Override public void run() { chatService.postMessage("TestRoom", "aUser", "Hello, this is my message " + counter.incrementAndGet(), ""); latch.countDown(); } }); } System.out.println("Chat sent, waiting for the end..."); latch.await(2, TimeUnit.MINUTES); Messages ret = chatService.findMessages("TEST", 5); System.out.println("result : " + ret); } finally { locator.disconnect(); } }
From source file:com.spectralogic.ds3client.metadata.MetadataAccessImpl_Test.java
@Test public void testMetadataAccessFailureHandlerWindows() { Assume.assumeTrue(Platform.isWindows()); try {//from w w w. ja v a2s . c om final ImmutableMap.Builder<String, Path> fileMapper = ImmutableMap.builder(); final String fileName = "file"; fileMapper.put(fileName, Paths.get(fileName)); final AtomicInteger numTimesFailureHandlerCalled = new AtomicInteger(0); new MetadataAccessImpl(fileMapper.build(), new FailureEventListener() { @Override public void onFailure(final FailureEvent failureEvent) { numTimesFailureHandlerCalled.incrementAndGet(); assertEquals(FailureEvent.FailureActivity.RecordingMetadata, failureEvent.doingWhat()); } }, "localhost").getMetadataValue(fileName); assertEquals(1, numTimesFailureHandlerCalled.get()); } catch (final Throwable t) { fail("Throwing exceptions from metadata est verbotten."); } }
From source file:cn.edu.zjnu.acm.judge.core.Judger.java
@PostConstruct public void init() { final ThreadGroup group = new ThreadGroup("judge group"); final AtomicInteger countet = new AtomicInteger(); final ThreadFactory threadFactory = runnable -> new Thread(group, runnable, "judge thread " + countet.incrementAndGet()); executorService = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors(), threadFactory); }
From source file:org.apache.tinkerpop.gremlin.server.GremlinServerSessionIntegrateTest.java
@Test public void shouldCloseSessionOnceOnRequest() throws Exception { final Cluster cluster = Cluster.build().create(); final Client client = cluster.connect(name.getMethodName()); final ResultSet results1 = client.submit("x = [1,2,3,4,5,6,7,8,9]"); final AtomicInteger counter = new AtomicInteger(0); results1.stream().map(i -> i.get(Integer.class) * 2) .forEach(i -> assertEquals(counter.incrementAndGet() * 2, Integer.parseInt(i.toString()))); final ResultSet results2 = client.submit("x[0]+1"); assertEquals(2, results2.all().get().get(0).getInt()); // explicitly close the session client.close();/*from w w w .j ava2 s . com*/ // wait past automatic session expiration Thread.sleep(3500); try { // the original session should be dead so this call will open a new session with the same name but fail // because the state is now gone - x is an invalid property client.submit("x[1]+2").all().get(); fail("Session should be dead"); } catch (Exception ex) { final Throwable root = ExceptionUtils.getRootCause(ex); assertThat(root, instanceOf(ConnectionException.class)); } finally { cluster.close(); } assertEquals(1, recordingAppender.getMessages().stream() .filter(msg -> msg.equals("INFO - Session shouldCloseSessionOnceOnRequest closed\n")).count()); }
From source file:com.cloudera.livy.client.local.rpc.TestRpc.java
@Test public void testCloseListener() throws Exception { RpcServer server = autoClose(new RpcServer(emptyConfig)); Rpc[] rpcs = createRpcConnection(server); Rpc client = rpcs[1];/*from w w w . jav a2 s. c om*/ final AtomicInteger closeCount = new AtomicInteger(); client.addListener(new Rpc.Listener() { @Override public void rpcClosed(Rpc rpc) { closeCount.incrementAndGet(); } }); client.close(); client.close(); assertEquals(1, closeCount.get()); }
From source file:com.palantir.docker.compose.DockerComposeRuleShould.java
@Test public void pass_wait_for_service_when_check_is_true() throws IOException, InterruptedException { AtomicInteger timesCheckCalled = new AtomicInteger(0); withComposeExecutableReturningContainerFor("db"); HealthCheck<Container> checkCalledOnce = (container) -> SuccessOrFailure .fromBoolean(timesCheckCalled.incrementAndGet() == 1, "not called once yet"); DockerComposeRule.builder().from(rule).waitingForService("db", checkCalledOnce).build().before(); assertThat(timesCheckCalled.get(), is(1)); }
From source file:org.apache.tinkerpop.gremlin.server.GremlinServerSessionIntegrateTest.java
@Test public void shouldHaveTheSessionTimeout() throws Exception { final Cluster cluster = Cluster.build().create(); final Client client = cluster.connect(name.getMethodName()); final ResultSet results1 = client.submit("x = [1,2,3,4,5,6,7,8,9]"); final AtomicInteger counter = new AtomicInteger(0); results1.stream().map(i -> i.get(Integer.class) * 2) .forEach(i -> assertEquals(counter.incrementAndGet() * 2, Integer.parseInt(i.toString()))); final ResultSet results2 = client.submit("x[0]+1"); assertEquals(2, results2.all().get().get(0).getInt()); // session times out in 3 seconds Thread.sleep(3500);//from w w w . j av a 2 s.c o m try { // the original session should be dead so this call will open a new session with the same name but fail // because the state is now gone - x is an invalid property client.submit("x[1]+2").all().get(); fail("Session should be dead"); } catch (Exception ex) { final Throwable cause = ExceptionUtils.getCause(ex); assertThat(cause, instanceOf(ResponseException.class)); assertEquals(ResponseStatusCode.SERVER_ERROR_SCRIPT_EVALUATION, ((ResponseException) cause).getResponseStatusCode()); // validate that we can still send messages to the server assertEquals(2, client.submit("1+1").all().join().get(0).getInt()); } finally { cluster.close(); } assertEquals(1, recordingAppender.getMessages().stream() .filter(msg -> msg.equals("INFO - Session shouldHaveTheSessionTimeout closed\n")).count()); }
From source file:ufo.remote.calls.benchmark.client.caller.activemq.ActiveMQTester.java
@Override protected void startTest(final TesterResult result) { ProducerTemplate producerTemplate = camelContext.createProducerTemplate(); producerTemplate.setExecutorService(Executors.newFixedThreadPool(20)); String url = ActiveMQApacheCamelConfig.JMS_NAME + ":queue:echo?deliveryPersistent=false&replyToDeliveryPersistent=false"; AtomicInteger failures = new AtomicInteger(0); CountDownLatch latch = new CountDownLatch(result.totalCalls); for (int i = 0; i < result.totalCalls; i++) { producerTemplate.asyncCallbackRequestBody(url, result.message, new Synchronization() { @Override//from w ww .j a va 2 s.co m public void onFailure(final Exchange exchange) { failures.incrementAndGet(); latch.countDown(); } @Override public void onComplete(final Exchange exchange) { if (logger.isDebugEnabled()) { logger.debug("Received message [{}]", exchange.getIn().getBody()); } latch.countDown(); } }); } try { latch.await(); } catch (InterruptedException e) { throw new RuntimeException(e); } result.failures = failures.get(); }