Example usage for java.util.concurrent.atomic AtomicInteger AtomicInteger

List of usage examples for java.util.concurrent.atomic AtomicInteger AtomicInteger

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger AtomicInteger.

Prototype

public AtomicInteger(int initialValue) 

Source Link

Document

Creates a new AtomicInteger with the given initial value.

Usage

From source file:com.linkedin.pinot.perf.QueryRunner.java

/**
 * Use multiple threads to run queries as fast as possible.
 *
 * Start {numThreads} worker threads to send queries (blocking call) back to back, and use the main thread to collect
 * the statistic information and log them periodically.
 *
 * @param conf perf benchmark driver config.
 * @param queryFile query file.//w  ww  . j  av a 2 s  .  c o  m
 * @param numThreads number of threads sending queries.
 * @throws Exception
 */
@SuppressWarnings("InfiniteLoopStatement")
public static void multiThreadedsQueryRunner(PerfBenchmarkDriverConf conf, String queryFile,
        final int numThreads) throws Exception {
    final long randomSeed = 123456789L;
    final Random random = new Random(randomSeed);
    final int reportIntervalMillis = 3000;

    final List<String> queries;
    try (FileInputStream input = new FileInputStream(new File(queryFile))) {
        queries = IOUtils.readLines(input);
    }

    final int numQueries = queries.size();
    final PerfBenchmarkDriver driver = new PerfBenchmarkDriver(conf);
    final AtomicInteger counter = new AtomicInteger(0);
    final AtomicLong totalResponseTime = new AtomicLong(0L);
    final ExecutorService executorService = Executors.newFixedThreadPool(numThreads);

    final DescriptiveStatistics stats = new DescriptiveStatistics();
    final CountDownLatch latch = new CountDownLatch(numThreads);

    for (int i = 0; i < numThreads; i++) {
        executorService.submit(new Runnable() {
            @Override
            public void run() {
                for (int j = 0; j < numQueries; j++) {
                    String query = queries.get(random.nextInt(numQueries));
                    long startTime = System.currentTimeMillis();
                    try {
                        driver.postQuery(query);
                        long clientTime = System.currentTimeMillis() - startTime;
                        synchronized (stats) {
                            stats.addValue(clientTime);
                        }

                        counter.getAndIncrement();
                        totalResponseTime.getAndAdd(clientTime);
                    } catch (Exception e) {
                        LOGGER.error("Caught exception while running query: {}", query, e);
                        return;
                    }
                }
                latch.countDown();
            }
        });
    }

    executorService.shutdown();

    int iter = 0;
    long startTime = System.currentTimeMillis();
    while (latch.getCount() > 0) {
        Thread.sleep(reportIntervalMillis);
        double timePassedSeconds = ((double) (System.currentTimeMillis() - startTime)) / MILLIS_PER_SECOND;
        int count = counter.get();
        double avgResponseTime = ((double) totalResponseTime.get()) / count;
        LOGGER.info("Time Passed: {}s, Query Executed: {}, QPS: {}, Avg Response Time: {}ms", timePassedSeconds,
                count, count / timePassedSeconds, avgResponseTime);

        iter++;
        if (iter % 10 == 0) {
            printStats(stats);
        }
    }

    printStats(stats);
}

From source file:com.alibaba.rocketmq.filtersrv.FiltersrvStartup.java

public static FiltersrvController createController(String[] args) {
    System.setProperty(RemotingCommand.RemotingVersionKey, Integer.toString(MQVersion.CurrentVersion));

    // Socket???//from   w ww. java 2  s  . com
    if (null == System.getProperty(NettySystemConfig.SystemPropertySocketSndbufSize)) {
        NettySystemConfig.SocketSndbufSize = 65535;
    }

    // Socket?
    if (null == System.getProperty(NettySystemConfig.SystemPropertySocketRcvbufSize)) {
        NettySystemConfig.SocketRcvbufSize = 1024;
    }

    try {
        // ?
        Options options = ServerUtil.buildCommandlineOptions(new Options());
        final CommandLine commandLine = ServerUtil.parseCmdLine("mqfiltersrv", args,
                buildCommandlineOptions(options), new PosixParser());
        if (null == commandLine) {
            System.exit(-1);
            return null;
        }

        // ??
        final FiltersrvConfig filtersrvConfig = new FiltersrvConfig();
        final NettyServerConfig nettyServerConfig = new NettyServerConfig();

        if (commandLine.hasOption('c')) {
            String file = commandLine.getOptionValue('c');
            if (file != null) {
                InputStream in = new BufferedInputStream(new FileInputStream(file));
                Properties properties = new Properties();
                properties.load(in);
                MixAll.properties2Object(properties, filtersrvConfig);
                System.out.println("load config properties file OK, " + file);
                in.close();

                String port = properties.getProperty("listenPort");
                if (port != null) {
                    filtersrvConfig.setConnectWhichBroker(String.format("127.0.0.1:%s", port));
                }
            }
        }

        // 0???
        nettyServerConfig.setListenPort(0);

        nettyServerConfig.setServerAsyncSemaphoreValue(filtersrvConfig.getFsServerAsyncSemaphoreValue());
        nettyServerConfig
                .setServerCallbackExecutorThreads(filtersrvConfig.getFsServerCallbackExecutorThreads());
        nettyServerConfig.setServerWorkerThreads(filtersrvConfig.getFsServerWorkerThreads());

        // ??
        if (commandLine.hasOption('p')) {
            MixAll.printObjectProperties(null, filtersrvConfig);
            MixAll.printObjectProperties(null, nettyServerConfig);
            System.exit(0);
        }

        MixAll.properties2Object(ServerUtil.commandLine2Properties(commandLine), filtersrvConfig);

        if (null == filtersrvConfig.getRocketmqHome()) {
            System.out.println("Please set the " + MixAll.ROCKETMQ_HOME_ENV
                    + " variable in your environment to match the location of the RocketMQ installation");
            System.exit(-2);
        }

        // ?Logback
        LoggerContext lc = (LoggerContext) LoggerFactory.getILoggerFactory();
        JoranConfigurator configurator = new JoranConfigurator();
        configurator.setContext(lc);
        lc.reset();
        configurator.doConfigure(filtersrvConfig.getRocketmqHome() + "/conf/logback_filtersrv.xml");
        log = LoggerFactory.getLogger(LoggerName.FiltersrvLoggerName);

        // ??
        final FiltersrvController controller = new FiltersrvController(filtersrvConfig, nettyServerConfig);
        boolean initResult = controller.initialize();
        if (!initResult) {
            controller.shutdown();
            System.exit(-3);
        }

        Runtime.getRuntime().addShutdownHook(new Thread(new Runnable() {
            private volatile boolean hasShutdown = false;
            private AtomicInteger shutdownTimes = new AtomicInteger(0);

            @Override
            public void run() {
                synchronized (this) {
                    log.info("shutdown hook was invoked, " + this.shutdownTimes.incrementAndGet());
                    if (!this.hasShutdown) {
                        this.hasShutdown = true;
                        long begineTime = System.currentTimeMillis();
                        controller.shutdown();
                        long consumingTimeTotal = System.currentTimeMillis() - begineTime;
                        log.info("shutdown hook over, consuming time total(ms): " + consumingTimeTotal);
                    }
                }
            }
        }, "ShutdownHook"));

        return controller;
    } catch (Throwable e) {
        e.printStackTrace();
        System.exit(-1);
    }

    return null;
}

From source file:org.apache.hadoop.gateway.hdfs.dispatch.WebHdfsHaHttpClientDispatch.java

private void failoverRequest(HttpUriRequest outboundRequest, HttpServletRequest inboundRequest,
        HttpServletResponse outboundResponse, HttpResponse inboundResponse, Exception exception)
        throws IOException {
    LOG.failingOverRequest(outboundRequest.getURI().toString());
    AtomicInteger counter = (AtomicInteger) inboundRequest.getAttribute(FAILOVER_COUNTER_ATTRIBUTE);
    if (counter == null) {
        counter = new AtomicInteger(0);
    }//from w  w w.  jav  a 2s .  c  om
    inboundRequest.setAttribute(FAILOVER_COUNTER_ATTRIBUTE, counter);
    if (counter.incrementAndGet() <= maxFailoverAttempts) {
        haProvider.markFailedURL(resourceRole, outboundRequest.getURI().toString());
        //null out target url so that rewriters run again
        inboundRequest.setAttribute(AbstractGatewayFilter.TARGET_REQUEST_URL_ATTRIBUTE_NAME, null);
        URI uri = getDispatchUrl(inboundRequest);
        ((HttpRequestBase) outboundRequest).setURI(uri);
        if (failoverSleep > 0) {
            try {
                Thread.sleep(failoverSleep);
            } catch (InterruptedException e) {
                LOG.failoverSleepFailed(resourceRole, e);
            }
        }
        executeRequest(outboundRequest, inboundRequest, outboundResponse);
    } else {
        LOG.maxFailoverAttemptsReached(maxFailoverAttempts, resourceRole);
        if (inboundResponse != null) {
            writeOutboundResponse(outboundRequest, inboundRequest, outboundResponse, inboundResponse);
        } else {
            throw new IOException(exception);
        }
    }
}

From source file:com.rapidminer.gui.plotter.charts.HistogramColorChart.java

@Override
public void prepareData() {
    histogramDataset = new RapidHistogramDataset(isLogScale());
    categoryDataset.clear();//from w w  w. j  a v a  2 s  . com

    if (colorColumn < 0 || valueColumn < 0) {
        return;
    }

    if (dataTable.isNominal(colorColumn)) {
        if (dataTable.isNominal(valueColumn)) {
            this.nominal = true;
            synchronized (dataTable) {
                Map<String, Map<String, AtomicInteger>> categoryValues = new LinkedHashMap<>();
                for (int i = 0; i < this.dataTable.getNumberOfValues(colorColumn); i++) {
                    String key = this.dataTable.mapIndex(colorColumn, i);
                    Map<String, AtomicInteger> innerMap = new LinkedHashMap<>();
                    for (int j = 0; j < this.dataTable.getNumberOfValues(valueColumn); j++) {
                        innerMap.put(this.dataTable.mapIndex(valueColumn, j), new AtomicInteger(0));
                    }
                    categoryValues.put(key, innerMap);
                }

                Iterator<DataTableRow> j = dataTable.iterator();
                while (j.hasNext()) {
                    DataTableRow row = j.next();
                    String colorString = this.dataTable.getValueAsString(row, colorColumn);
                    String valueString = this.dataTable.getValueAsString(row, valueColumn);
                    categoryValues.get(colorString).get(valueString).incrementAndGet();
                }

                for (String key : categoryValues.keySet()) {
                    for (String value : categoryValues.get(key).keySet()) {
                        int count = categoryValues.get(key).get(value).get();
                        categoryDataset.addValue(count, key, value);
                    }
                }
            }
        } else {
            this.nominal = false;
            synchronized (dataTable) {
                Map<String, List<Double>> classMap = new LinkedHashMap<>();
                for (int i = 0; i < this.dataTable.getNumberOfValues(colorColumn); i++) {
                    classMap.put(this.dataTable.mapIndex(colorColumn, i), new LinkedList<Double>());
                }
                Iterator<DataTableRow> i = dataTable.iterator();
                while (i.hasNext()) {
                    DataTableRow row = i.next();
                    double value = row.getValue(valueColumn);
                    if (this.absolute) {
                        value = Math.abs(value);
                    }
                    String colorValue = this.dataTable.getValueAsString(row, colorColumn);
                    List<Double> colorList = classMap.get(colorValue);
                    if (colorList != null) {
                        colorList.add(value);
                    }
                }

                for (Entry<String, List<Double>> entry : classMap.entrySet()) {
                    List<Double> valueList = entry.getValue();
                    double[] values = new double[valueList.size()];
                    int index = 0;
                    for (double v : valueList) {
                        values[index++] = v;
                    }
                    histogramDataset.addSeries(entry.getKey(), values, this.binNumber);
                }
            }

            if (dataTable.isDateTime(valueColumn)) {
                this.datetime = true;
            }
        }
    }
}

From source file:com.adobe.acs.commons.workflow.process.impl.SyncSmartTagsToXmpMetadataNodeProcess.java

protected void syncSmartTagsToMetadata(final Asset asset, ProcessArgs processArgs) throws PersistenceException {
    final Resource assetResource = asset.adaptTo(Resource.class);
    final ResourceResolver resourceResolver = assetResource.getResourceResolver();

    final Resource metadataResource = assetResource
            .getChild(JcrConstants.JCR_CONTENT + "/" + DamConstants.METADATA_FOLDER);
    final Resource smartTagsResource = assetResource
            .getChild(JcrConstants.JCR_CONTENT + "/" + DamConstants.METADATA_FOLDER + "/" + NN_PREDICTED_TAGS);

    if (metadataResource.getChild(processArgs.getSequenceName()) != null) {
        // Remove existing, as they will be re-created
        resourceResolver.delete(metadataResource.getChild(processArgs.getSequenceName()));
    }//from  w  ww  .  j ava2  s.c  o m

    final Resource parentResource = resourceResolver.create(metadataResource, processArgs.getSequenceName(),
            new ImmutableMap.Builder<String, Object>()
                    .put(JcrConstants.JCR_PRIMARYTYPE, JcrConstants.NT_UNSTRUCTURED)
                    .put("xmpArrayType", "rdf:Seq").put("xmpNodeType", "xmpArray").put("xmpArraySize", 0L)
                    .build());

    final AtomicInteger count = new AtomicInteger(0);
    if (smartTagsResource != null) {
        StreamSupport.stream(smartTagsResource.getChildren().spliterator(), false).map(Resource::getValueMap)
                .filter(properties -> properties.get(PN_SMART_TAG_CONFIDENCE, 0D) >= processArgs
                        .getMinimumConfidence())
                .filter(properties -> StringUtils.isNotBlank(properties.get(PN_SMART_TAG_NAME, String.class)))
                .forEach(properties -> {
                    createSequenceItemResource(asset, processArgs, resourceResolver, parentResource, count,
                            properties);
                });
    }

    parentResource.adaptTo(ModifiableValueMap.class).put("xmpArraySize", count.get());

    log.info("Synced [ {} ] Smart Tags to Asset XMP Metadata structure: [ {} ] ", count.get(),
            asset.getPath() + "/jcr:content/metadata/" + processArgs.getSequenceName());
}

From source file:org.glassfish.jersey.examples.sseitemstore.jaxrs.JaxrsItemStoreResourceTest.java

/**
 * Test the item addition, addition event broadcasting and item retrieval from {@link JaxrsItemStoreResource}.
 *
 * @throws Exception in case of a test failure.
 *//*w  w w .ja v  a2 s .  c om*/
@Test
public void testItemsStore() throws Exception {
    final List<String> items = Collections.unmodifiableList(Arrays.asList("foo", "bar", "baz"));
    final WebTarget itemsTarget = target("items");
    final CountDownLatch latch = new CountDownLatch(items.size() * MAX_LISTENERS * 2); // countdown on all events
    final List<Queue<Integer>> indexQueues = new ArrayList<>(MAX_LISTENERS);
    final SseEventSource[] sources = new SseEventSource[MAX_LISTENERS];
    final AtomicInteger sizeEventsCount = new AtomicInteger(0);

    for (int i = 0; i < MAX_LISTENERS; i++) {
        final int id = i;
        final SseEventSource es = SseEventSource.target(itemsTarget.path("events")).build();
        sources[id] = es;

        final Queue<Integer> indexes = new ConcurrentLinkedQueue<>();
        indexQueues.add(indexes);

        es.register(inboundEvent -> {
            try {
                if (null == inboundEvent.getName()) {
                    final String data = inboundEvent.readData();
                    LOGGER.info("[-i-] SOURCE " + id + ": Received event id=" + inboundEvent.getId() + " data="
                            + data);
                    indexes.add(items.indexOf(data));
                } else if ("size".equals(inboundEvent.getName())) {
                    sizeEventsCount.incrementAndGet();
                }
            } catch (Exception ex) {
                LOGGER.log(Level.SEVERE, "[-x-] SOURCE " + id + ": Error getting event data.", ex);
                indexes.add(-999);
            } finally {
                latch.countDown();
            }
        });
    }

    try {
        open(sources);
        items.forEach((item) -> postItem(itemsTarget, item));

        assertTrue("Waiting to receive all events has timed out.",
                latch.await((1000 + MAX_LISTENERS * RECONNECT_DEFAULT) * getAsyncTimeoutMultiplier(),
                        TimeUnit.MILLISECONDS));

        // need to force disconnect on server in order for EventSource.close(...) to succeed with HttpUrlConnection
        sendCommand(itemsTarget, "disconnect");
    } finally {
        close(sources);
    }

    String postedItems = itemsTarget.request().get(String.class);
    items.forEach(
            (item) -> assertTrue("Item '" + item + "' not stored on server.", postedItems.contains(item)));

    final AtomicInteger queueId = new AtomicInteger(0);
    indexQueues.forEach((indexes) -> {
        for (int i = 0; i < items.size(); i++) {
            assertTrue("Event for '" + items.get(i) + "' not received in queue " + queueId.get(),
                    indexes.contains(i));
        }
        assertEquals("Not received the expected number of events in queue " + queueId.get(), items.size(),
                indexes.size());
        queueId.incrementAndGet();
    });

    assertEquals("Number of received 'size' events does not match.", items.size() * MAX_LISTENERS,
            sizeEventsCount.get());
}

From source file:io.cloudslang.worker.management.services.OutboundBufferImpl.java

private void drainInternal(List<Message> bufferToDrain) {
    List<Message> bulk = new ArrayList<>();
    int bulkWeight = 0;
    Map<String, AtomicInteger> logMap = new HashMap<>();
    try {//from  w w w . ja  v  a 2 s  . co  m
        for (Message message : bufferToDrain) {
            if (message.getClass().equals(CompoundMessage.class)) {
                bulk.addAll(((CompoundMessage) message).asList());
            } else {
                bulk.add(message);
            }
            bulkWeight += message.getWeight();

            if (logger.isDebugEnabled()) {
                if (logMap.get(message.getClass().getSimpleName()) == null)
                    logMap.put(message.getClass().getSimpleName(), new AtomicInteger(1));
                else
                    logMap.get(message.getClass().getSimpleName()).incrementAndGet();
            }

            if (bulkWeight > maxBulkWeight) {
                if (logger.isDebugEnabled())
                    logger.debug("trying to drain bulk: " + logMap.toString() + ", W:" + bulkWeight);
                drainBulk(bulk);
                bulk.clear();
                bulkWeight = 0;
                logMap.clear();
            }
        }
        // drain the last bulk
        if (logger.isDebugEnabled())
            logger.debug("trying to drain bulk: " + logMap.toString() + ", " + getStatus());
        drainBulk(bulk);
    } catch (Exception ex) {
        logger.error("Failed to drain buffer, invoking worker internal recovery... ", ex);
        recoveryManager.doRecovery();
    }
}

From source file:com.palantir.docker.compose.DockerComposeRuleShould.java

@Test
public void pass_wait_for_service_when_check_is_true_after_being_false()
        throws IOException, InterruptedException {
    AtomicInteger timesCheckCalled = new AtomicInteger(0);
    withComposeExecutableReturningContainerFor("db");
    HealthCheck<Container> checkCalledTwice = (container) -> SuccessOrFailure
            .fromBoolean(timesCheckCalled.incrementAndGet() == 2, "not called twice yet");
    DockerComposeRule.builder().from(rule).waitingForService("db", checkCalledTwice).build().before();
    assertThat(timesCheckCalled.get(), is(2));
}

From source file:org.apache.hadoop.gateway.hdfs.dispatch.WebHdfsHaDispatch.java

private void failoverRequest(HttpUriRequest outboundRequest, HttpServletRequest inboundRequest,
        HttpServletResponse outboundResponse, HttpResponse inboundResponse, Exception exception)
        throws IOException {
    LOG.failingOverRequest(outboundRequest.getURI().toString());
    AtomicInteger counter = (AtomicInteger) inboundRequest.getAttribute(FAILOVER_COUNTER_ATTRIBUTE);
    if (counter == null) {
        counter = new AtomicInteger(0);
    }/*from  w ww.  j  a  v  a2  s . c om*/
    inboundRequest.setAttribute(FAILOVER_COUNTER_ATTRIBUTE, counter);
    if (counter.incrementAndGet() <= maxFailoverAttempts) {
        haProvider.markFailedURL(RESOURCE_ROLE, outboundRequest.getURI().toString());
        //null out target url so that rewriters run again
        inboundRequest.setAttribute(AbstractGatewayFilter.TARGET_REQUEST_URL_ATTRIBUTE_NAME, null);
        URI uri = getDispatchUrl(inboundRequest);
        ((HttpRequestBase) outboundRequest).setURI(uri);
        if (failoverSleep > 0) {
            try {
                Thread.sleep(failoverSleep);
            } catch (InterruptedException e) {
                LOG.failoverSleepFailed(RESOURCE_ROLE, e);
            }
        }
        executeRequest(outboundRequest, inboundRequest, outboundResponse);
    } else {
        LOG.maxFailoverAttemptsReached(maxFailoverAttempts, RESOURCE_ROLE);
        if (inboundResponse != null) {
            writeOutboundResponse(outboundRequest, inboundRequest, outboundResponse, inboundResponse);
        } else {
            throw new IOException(exception);
        }
    }
}

From source file:org.glassfish.jersey.examples.sseitemstore.jersey.JerseyItemStoreResourceTest.java

/**
 * Test the item addition, addition event broadcasting and item retrieval from {@link ItemStoreResource}.
 *
 * @throws Exception in case of a test failure.
 *///from ww w . j a va2s . co m
@Test
public void testItemsStore() throws Exception {
    final List<String> items = Collections.unmodifiableList(Arrays.asList("foo", "bar", "baz"));
    final WebTarget itemsTarget = target("items");
    final CountDownLatch latch = new CountDownLatch(items.size() * MAX_LISTENERS * 2); // countdown on all events
    final List<Queue<Integer>> indexQueues = new ArrayList<>(MAX_LISTENERS);
    final EventSource[] sources = new EventSource[MAX_LISTENERS];
    final AtomicInteger sizeEventsCount = new AtomicInteger(0);

    for (int i = 0; i < MAX_LISTENERS; i++) {
        final int id = i;
        final EventSource es = EventSource.target(itemsTarget.path("events")).named("SOURCE " + id).build();
        sources[id] = es;

        final Queue<Integer> indexes = new ConcurrentLinkedQueue<>();
        indexQueues.add(indexes);

        es.register(inboundEvent -> {
            try {
                if (inboundEvent.getName() == null) {
                    final String data = inboundEvent.readData();
                    LOGGER.info("[-i-] SOURCE " + id + ": Received event id=" + inboundEvent.getId() + " data="
                            + data);
                    indexes.add(items.indexOf(data));
                } else if ("size".equals(inboundEvent.getName())) {
                    sizeEventsCount.incrementAndGet();
                }
            } catch (Exception ex) {
                LOGGER.log(Level.SEVERE, "[-x-] SOURCE " + id + ": Error getting event data.", ex);
                indexes.add(-999);
            } finally {
                latch.countDown();
            }
        });
    }

    try {
        open(sources);

        for (String item : items) {
            postItem(itemsTarget, item);
        }

        assertTrue("Waiting to receive all events has timed out.",
                latch.await(
                        (1000 + MAX_LISTENERS * EventSource.RECONNECT_DEFAULT) * getAsyncTimeoutMultiplier(),
                        TimeUnit.MILLISECONDS));

        // need to force disconnect on server in order for EventSource.close(...) to succeed with HttpUrlConnection
        sendCommand(itemsTarget, "disconnect");
    } finally {
        close(sources);
    }

    String postedItems = itemsTarget.request().get(String.class);
    for (String item : items) {
        assertTrue("Item '" + item + "' not stored on server.", postedItems.contains(item));
    }

    int queueId = 0;
    for (Queue<Integer> indexes : indexQueues) {
        for (int i = 0; i < items.size(); i++) {
            assertTrue("Event for '" + items.get(i) + "' not received in queue " + queueId,
                    indexes.contains(i));
        }
        assertEquals("Not received the expected number of events in queue " + queueId, items.size(),
                indexes.size());
        queueId++;
    }

    assertEquals("Number of received 'size' events does not match.", items.size() * MAX_LISTENERS,
            sizeEventsCount.get());
}