Example usage for java.util.concurrent.atomic AtomicInteger getAndIncrement

List of usage examples for java.util.concurrent.atomic AtomicInteger getAndIncrement

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger getAndIncrement.

Prototype

public final int getAndIncrement() 

Source Link

Document

Atomically increments the current value, with memory effects as specified by VarHandle#getAndAdd .

Usage

From source file:org.sonar.server.rule.index.RuleIteratorForSingleChunk.java

private void setParameters(PreparedStatement stmt) throws SQLException {
    AtomicInteger index = new AtomicInteger(1);
    if (ruleKeys != null && !ruleKeys.isEmpty()) {
        for (RuleKey ruleKey : ruleKeys) {
            stmt.setString(index.getAndIncrement(), ruleKey.repository());
            stmt.setString(index.getAndIncrement(), ruleKey.rule());
        }/*from w ww  .  j  ava 2 s .c om*/
    }
}

From source file:com.flipkart.flux.client.intercept.TaskInterceptorTest.java

private void setupMockLocalContext() {
    final AtomicInteger eventCounter = new AtomicInteger(0);
    doAnswer(invocation -> (((Event) invocation.getArguments()[0]).name()) + eventCounter.getAndIncrement())
            .when(localContext).generateEventName(any(Event.class));
}

From source file:org.springframework.messaging.simp.broker.OrderedMessageSenderTests.java

@Test
public void test() throws InterruptedException {

    int start = 1;
    int end = 1000;

    AtomicInteger index = new AtomicInteger(start);
    AtomicReference<Object> result = new AtomicReference<>();
    CountDownLatch latch = new CountDownLatch(1);

    this.channel.subscribe(message -> {
        int expected = index.getAndIncrement();
        Integer actual = (Integer) message.getHeaders().getOrDefault("seq", -1);
        if (actual != expected) {
            result.set("Expected: " + expected + ", but was: " + actual);
            latch.countDown();/*from w w w .  j a v  a2 s . co  m*/
            return;
        }
        if (actual == 100 || actual == 200) {
            try {
                Thread.sleep(200);
            } catch (InterruptedException ex) {
                result.set(ex.toString());
                latch.countDown();
            }
        }
        if (actual == end) {
            result.set("Done");
            latch.countDown();
        }
    });

    for (int i = start; i <= end; i++) {
        SimpMessageHeaderAccessor accessor = SimpMessageHeaderAccessor.create(SimpMessageType.MESSAGE);
        accessor.setHeader("seq", i);
        accessor.setLeaveMutable(true);
        this.sender.send(MessageBuilder.createMessage("payload", accessor.getMessageHeaders()));
    }

    latch.await(10, TimeUnit.SECONDS);
    assertEquals("Done", result.get());
}

From source file:org.springframework.integration.channel.DirectChannelTests.java

@Test
public void testSendPerfTwoHandlers() {
    /*//ww w  .ja va  2  s  .c om
     *  INT-3308 - used to run 6.4 million/sec
     *  1. Skip empty iterators as above 7.2 million/sec
     *  2. optimize for single handler 6.7 million/sec (small overhead added)
     *  3. remove LB rwlock from UnicastingDispatcher 7.2 million/sec
     *  4. Move single handler optimization to dispatcher 7.3 million/sec
     */
    DirectChannel channel = new DirectChannel();
    final AtomicInteger count1 = new AtomicInteger();
    final AtomicInteger count2 = new AtomicInteger();
    channel.subscribe(message -> count1.incrementAndGet());
    channel.subscribe(message -> count2.getAndIncrement());
    GenericMessage<String> message = new GenericMessage<String>("test");
    assertTrue(channel.send(message));
    for (int i = 0; i < 10000000; i++) {
        channel.send(message);
    }
    assertEquals(5000001, count1.get());
    assertEquals(5000000, count2.get());
}

From source file:com.olacabs.fabric.compute.sources.kafka.impl.KafkaReaderLeaderElector.java

private void peerCountChange(boolean force) {
    final String memberPath = memberPathPrefix();
    List<String> members = null;
    try {// ww  w.  ja  v  a2  s  . c om
        members = curatorFramework.getChildren().forPath(memberPath);
        LOGGER.debug("Members: " + members);
        if (Sets.symmetricDifference(knownMembers, Sets.newHashSet(members)).isEmpty() && !force) {
            LOGGER.debug("No membership changes detected");
            return;
        } //.intersection(knownMembers, Sets.newHashSet(members))
    } catch (Exception e) {
        if (e instanceof KeeperException.NodeExistsException) {
            LOGGER.info("Looks like this topology/topic combination is being used for the first time");
        } else {
            LOGGER.error("Error checking for node on ZK: ", e);
        }
    }
    if (null == members) {
        LOGGER.error("No members found .. how did i come here? ZK issue?");
        return;
    }
    if (null == leaderSelector || !leaderSelector.hasLeadership()) {
        LOGGER.debug("I'm not the leader coordinator");
        return;
    }
    knownMembers = Sets.newHashSet(members);
    final List<String> finalMembers = members;
    AtomicInteger counter = new AtomicInteger(0);
    readers.keySet().forEach(partition -> {
        String selectedReader = finalMembers.get(counter.getAndIncrement() % finalMembers.size());
        LOGGER.info("[{}:{}:{}] Selected reader: {}", topology, topic, partition, selectedReader);
        final String communicatorPath = communicatorPath(partition);
        try {
            if (null == curatorFramework.checkExists().creatingParentContainersIfNeeded()
                    .forPath(communicatorPath)) {
                curatorFramework.create().creatingParentContainersIfNeeded().forPath(communicatorPath);
                LOGGER.info("[{}:{}:{}] Created communicator", topology, topic, partition);
            }
            curatorFramework.setData().forPath(communicatorPath, selectedReader.getBytes());
            LOGGER.error("Set reader at {} to {}", communicatorPath, selectedReader);
        } catch (Exception e) {
            LOGGER.error("Error setting reader value at {} to {}", communicatorPath, selectedReader, e);
        }
    });
}

From source file:lenscorrection.Distortion_Correction.java

static List<Feature>[] extractSIFTFeaturesThreaded(final int numberOfImages, final String directory,
        final String[] names) {
    //extract all SIFT Features

    final List<Feature>[] siftFeatures = new ArrayList[numberOfImages];
    final Thread[] threads = MultiThreading.newThreads();
    final AtomicInteger ai = new AtomicInteger(0); // start at second slice

    IJ.showStatus("Extracting SIFT Features");
    for (int ithread = 0; ithread < threads.length; ++ithread) {
        threads[ithread] = new Thread() {
            @Override/*  www.j a v  a  2  s.c  o m*/
            public void run() {
                for (int i = ai.getAndIncrement(); i < numberOfImages; i = ai.getAndIncrement()) {
                    final ArrayList<Feature> fs = new ArrayList<Feature>();
                    final ImagePlus imps = new Opener().openImage(directory + names[i + sp.firstImageIndex]);
                    imps.setProcessor(imps.getTitle(), imps.getProcessor().convertToFloat());

                    final FloatArray2DSIFT sift = new FloatArray2DSIFT(sp.sift.clone());
                    final SIFT ijSIFT = new SIFT(sift);

                    ijSIFT.extractFeatures(imps.getProcessor(), fs);

                    Collections.sort(fs);
                    IJ.log("Extracting SIFT of image: " + i);

                    siftFeatures[i] = fs;

                }
            }
        };
    }
    MultiThreading.startAndJoin(threads);

    return siftFeatures;
}

From source file:lh.api.showcase.server.util.HttpQueryUtils.java

public static String executeQuery(URI uri, ApiAuth apiAuth, HasProxySettings proxySetting,
        HttpClientFactory httpClientFactory, final int maxRetries) throws HttpErrorResponseException {

    //logger.info("uri: " + uri.toString());

    AtomicInteger tryCounter = new AtomicInteger(0);
    while (true) {

        CloseableHttpClient httpclient = httpClientFactory.getHttpClient(proxySetting);
        HttpGet httpGet = new HttpGet(uri);
        httpGet.addHeader("Authorization", apiAuth.getAuthHeader());
        httpGet.addHeader("Accept", "application/json");

        //logger.info("auth: " + apiAuth.getAuthHeader()) ;
        //logger.info("query: " + httpGet.toString());

        CloseableHttpResponse response = null;
        try {/*from w ww  .  j ava 2 s .  com*/
            response = httpclient.execute(httpGet);
            StatusLine status = response.getStatusLine();
            BufferedHttpEntity entity = new BufferedHttpEntity(response.getEntity());
            String json = IOUtils.toString(entity.getContent(), "UTF8");
            EntityUtils.consume(entity);
            //logger.info("response: " + json);

            // check for errors
            if (status != null && status.getStatusCode() > 299) {
                if (status.getStatusCode() == 401) {
                    // token has probably expired
                    logger.info("Authentication Error. Token will be refreshed");
                    if (tryCounter.getAndIncrement() < maxRetries) {
                        if (apiAuth.updateAccessToken()) {
                            logger.info("Token successfully refreshed");
                            // we retry with the new token
                            logger.info("Retry number " + tryCounter.get());
                            continue;
                        }
                    }
                }
                throw new HttpErrorResponseException(status.getStatusCode(), status.getReasonPhrase(), json);
            }
            return json;
        } catch (IOException e) {
            logger.severe(e.getMessage());
            break;
        } finally {
            try {
                if (response != null) {
                    response.close();
                }
            } catch (IOException e) {
                logger.log(Level.SEVERE, e.getMessage());
            }
        }
    }
    return null;
}

From source file:com.streamsets.pipeline.stage.processor.tensorflow.TensorFlowProcessor.java

private LinkedHashMap<String, Field> createOutputFieldValue(List<Tensor<?>> tensorOutput) {
    LinkedHashMap<String, Field> outputTensorFieldMap = new LinkedHashMap<>();
    final AtomicInteger tensorIncrementor = new AtomicInteger(0);
    conf.outputConfigs.forEach(outputConfig -> {
        try (Tensor t = tensorOutput.get(tensorIncrementor.getAndIncrement())) {
            TensorDataTypeSupport dtSupport = TensorTypeSupporter.INSTANCE
                    .getTensorDataTypeSupport(t.dataType());
            Field field = dtSupport.createFieldFromTensor(t);
            outputTensorFieldMap.put(outputConfig.operation + "_" + outputConfig.index, field);
        }/*from ww w  . ja v  a2 s .  c  om*/
    });
    return outputTensorFieldMap;
}

From source file:com.linkedin.pinot.perf.QueryRunner.java

/**
 * Use multiple threads to run queries as fast as possible.
 *
 * Start {numThreads} worker threads to send queries (blocking call) back to back, and use the main thread to collect
 * the statistic information and log them periodically.
 *
 * @param conf perf benchmark driver config.
 * @param queryFile query file.//from   w  w w . j  ava  2s .c o m
 * @param numThreads number of threads sending queries.
 * @throws Exception
 */
@SuppressWarnings("InfiniteLoopStatement")
public static void multiThreadedsQueryRunner(PerfBenchmarkDriverConf conf, String queryFile,
        final int numThreads) throws Exception {
    final long randomSeed = 123456789L;
    final Random random = new Random(randomSeed);
    final int reportIntervalMillis = 3000;

    final List<String> queries;
    try (FileInputStream input = new FileInputStream(new File(queryFile))) {
        queries = IOUtils.readLines(input);
    }

    final int numQueries = queries.size();
    final PerfBenchmarkDriver driver = new PerfBenchmarkDriver(conf);
    final AtomicInteger counter = new AtomicInteger(0);
    final AtomicLong totalResponseTime = new AtomicLong(0L);
    final ExecutorService executorService = Executors.newFixedThreadPool(numThreads);

    final DescriptiveStatistics stats = new DescriptiveStatistics();
    final CountDownLatch latch = new CountDownLatch(numThreads);

    for (int i = 0; i < numThreads; i++) {
        executorService.submit(new Runnable() {
            @Override
            public void run() {
                for (int j = 0; j < numQueries; j++) {
                    String query = queries.get(random.nextInt(numQueries));
                    long startTime = System.currentTimeMillis();
                    try {
                        driver.postQuery(query);
                        long clientTime = System.currentTimeMillis() - startTime;
                        synchronized (stats) {
                            stats.addValue(clientTime);
                        }

                        counter.getAndIncrement();
                        totalResponseTime.getAndAdd(clientTime);
                    } catch (Exception e) {
                        LOGGER.error("Caught exception while running query: {}", query, e);
                        return;
                    }
                }
                latch.countDown();
            }
        });
    }

    executorService.shutdown();

    int iter = 0;
    long startTime = System.currentTimeMillis();
    while (latch.getCount() > 0) {
        Thread.sleep(reportIntervalMillis);
        double timePassedSeconds = ((double) (System.currentTimeMillis() - startTime)) / MILLIS_PER_SECOND;
        int count = counter.get();
        double avgResponseTime = ((double) totalResponseTime.get()) / count;
        LOGGER.info("Time Passed: {}s, Query Executed: {}, QPS: {}, Avg Response Time: {}ms", timePassedSeconds,
                count, count / timePassedSeconds, avgResponseTime);

        iter++;
        if (iter % 10 == 0) {
            printStats(stats);
        }
    }

    printStats(stats);
}

From source file:org.commonjava.indy.ftest.core.content.ReDownloadOnContentTransferExceptionTest.java

@Test
public void run() throws Exception {
    final String path = "org/foo/bar/maven-metadata.xml";

    final AtomicInteger count = new AtomicInteger(0);
    server.expect("GET", server.formatUrl(STORE, path), (request, response) -> {
        response.setStatus(200);//w  w w. j a v a  2  s.c om
        response.setHeader("Content-Length", Integer.toString(responseContent.length()));

        int idx = count.getAndIncrement();
        if (idx < 1) {

            try {
                logger.info("ContenlengthExpectationHandlerExecutor call index =" + idx + " url:"
                        + request.getRequestURI());

                response.getWriter().write(responseContent.substring(0, responseContent.length() / 2));
            } catch (Throwable t) {
                throw new ServletException(t.getMessage());
            }

        } else {
            logger.info("ContenlengthExpectationHandlerExecutor call index =  " + idx + " url:"
                    + request.getRequestURI());

            response.getWriter().write(responseContent);
        }
    });

    RemoteRepository remote = new RemoteRepository(MavenPackageTypeDescriptor.MAVEN_PKG_KEY, STORE,
            server.formatUrl(STORE));

    //        remote.setMetadata( Location.CONNECTION_TIMEOUT_SECONDS, Integer.toString( -1 ) );
    client.stores().create(remote, "adding remote", RemoteRepository.class);

    StoreKey sk = new StoreKey(MavenPackageTypeDescriptor.MAVEN_PKG_KEY, StoreType.remote, STORE);

    assertThat(client.content().get(sk, path), nullValue());

    String result = IOUtils.toString(client.content().get(sk, path));

    logger.info("runWithMismacthByRemoteRespository ---- result :{}", result);
    assertThat(result, notNullValue());
}