Example usage for java.util.concurrent.atomic AtomicInteger AtomicInteger

List of usage examples for java.util.concurrent.atomic AtomicInteger AtomicInteger

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicInteger AtomicInteger.

Prototype

public AtomicInteger(int initialValue) 

Source Link

Document

Creates a new AtomicInteger with the given initial value.

Usage

From source file:byps.test.servlet.MyServerIF.java

@Override
public int callClientParallel(int nbOfCalls) throws RemoteException {
    if (log.isDebugEnabled())
        log.debug("callClientParallel(" + nbOfCalls);
    final ClientIF clientIF = getClientIF();
    final AtomicInteger ret = new AtomicInteger(0);
    ExecutorService tpool = Executors.newCachedThreadPool();
    for (int i = 0; i < nbOfCalls; i++) {
        Runnable run = new Runnable() {
            public void run() {
                try {
                    if (log.isDebugEnabled())
                        log.debug("clientIF.incrementInt(");
                    int v = clientIF.incrementInt(0);
                    if (log.isDebugEnabled())
                        log.debug(")clientIF.incrementInt");
                    ret.addAndGet(v);// w w  w  . jav  a 2 s .  c  o  m
                } catch (Exception e) {
                    log.error(e);
                }
            }
        };
        tpool.execute(run);
    }
    tpool.shutdown();
    try {
        tpool.awaitTermination(10, TimeUnit.SECONDS);
    } catch (InterruptedException e) {
        throw new BException(BExceptionC.CANCELLED, e.toString(), e);
    }
    if (log.isDebugEnabled())
        log.debug(")callClientParallel");
    return ret.get();
}

From source file:au.org.ala.bhl.service.IndexingService.java

/**
 * Indexes an item that exists in the document cache
 * /*  w ww  . j a v a2  s  .co  m*/
 * @param item
 */
public void indexItem(final ItemDescriptor item) {

    String itemPathStr = _docCache.getItemDirectoryPath(item.getInternetArchiveId());

    final SolrServer server = createSolrServer();

    log("Indexing pages %s for item %s", itemPathStr, item.getItemId());

    try {
        final AtomicInteger pageCount = new AtomicInteger(0);
        File itemPath = new File(itemPathStr);
        if (itemPath.exists() && itemPath.isDirectory()) {
            File f = _docCache.getPageArchiveFile(item);
            if (f.exists()) {
                _docCache.forEachItemPage(item, new CachedItemPageHandler() {

                    public void startItem(String itemId) {
                    }

                    public void onPage(String iaId, String pageId, String text) {
                        indexPage(item, pageId, text, server);
                        pageCount.incrementAndGet();

                        if (pageCount.get() % 100 == 0) {
                            try {
                                server.commit();
                            } catch (Exception ex) {
                                throw new RuntimeException(ex);
                            }
                        }
                    }

                    public void endItem(String itemId) {
                    }
                });

                if (pageCount.get() > 0) {
                    server.commit();
                    getItemsService().setItemStatus(item.getItemId(), ItemStatus.INDEXED, pageCount.get());
                    log("%s pages indexed for item: %s", pageCount, item.getItemId());
                } else {
                    log("Ignoring empty item (no pages): %s", item.getItemId());
                }
            } else {
                log("Ignoring partial or empty item (no archive file found): %s", item.getInternetArchiveId());
            }
        }

    } catch (Exception ex) {
        ex.printStackTrace();
    }
}

From source file:com.magnet.mmx.server.plugin.mmxmgmt.apns.APNSConnectionPoolImplTest.java

@Test
public void testPoolWithMultipleThreads() {
    String testAppId = "multithreadTestApp";
    /**//from  w  w w . ja  v a  2s.c o m
     * create 5 threads each sending to 100 devices
     */
    int deviceCount = 100;
    int threadCount = 5;
    Map<String, String> payload = new LinkedHashMap<String, String>(100);
    for (int i = 0; i < deviceCount; i++) {
        payload.put("device:" + (i + 1), "JSON Payload{}:" + (i + 1));
    }
    objectFactory.clearCounter();
    APNSConnectionPoolImpl pool = APNSConnectionPoolImpl.getInstance();
    //initialize the pool with connections for all apps

    //    for (int i = 0; i < appIds.length; i++) {
    //      APNSConnection conn = pool.getConnection(appIds[i], true);
    //      pool.returnConnection(conn);
    //    }

    CountDownLatch countDownLatch = new CountDownLatch(threadCount);
    Executor executor = Executors.newFixedThreadPool(threadCount, new ThreadFactory() {
        private AtomicInteger counter = new AtomicInteger(1);

        @Override
        public Thread newThread(Runnable r) {
            Thread t = new Thread(r);
            t.setName("TestThread:" + counter.getAndIncrement());
            return t;
        }
    });
    for (int i = 0; i < threadCount; i++) {
        executor.execute(new SimpleAPNSSenderThread(testAppId, true, payload, countDownLatch));
    }
    //wait for the threads to finish
    try {
        countDownLatch.await();
    } catch (InterruptedException e) {
        e.printStackTrace();
        fail(e.getMessage());
    }
    int count = objectFactory.getCreatedCount(new APNSConnectionPoolImpl.APNSConnectionKey(testAppId, true));
    assertEquals("Object got created too many times", MAX_OBJECTS_PER_KEY, count);
}

From source file:com.insys.cfclient.nozzle.InfluxDBSender.java

@Async
public void sendBatch(List<String> messages) {
    log.debug("ENTER sendBatch");
    httpClient.setErrorHandler(new ResponseErrorHandler() {
        @Override/*from  w  ww .j  a v a  2s . c o  m*/
        public boolean hasError(ClientHttpResponse clientHttpResponse) throws IOException {
            return clientHttpResponse.getRawStatusCode() > 399;
        }

        @Override
        public void handleError(ClientHttpResponse clientHttpResponse) throws IOException {

        }
    });

    RetryTemplate retryable = new RetryTemplate();
    retryable.setBackOffPolicy(getBackOffPolicy());
    retryable.setRetryPolicy(new SimpleRetryPolicy(properties.getMaxRetries(),
            Collections.singletonMap(ResourceAccessException.class, true)));

    final AtomicInteger counter = new AtomicInteger(0);
    retryable.execute(retryContext -> {
        int count = counter.incrementAndGet();
        log.trace("Attempt {} to deliver this batch", count);
        final StringBuilder builder = new StringBuilder();
        messages.forEach(s -> builder.append(s).append("\n"));

        String body = builder.toString();

        RequestEntity<String> entity = new RequestEntity<>(body, HttpMethod.POST, getUri());

        ResponseEntity<String> response;

        response = httpClient.exchange(entity, String.class);

        if (response.getStatusCode() != HttpStatus.NO_CONTENT) {
            log.error("Failed to write logs to InfluxDB! Expected error code 204, got {}",
                    response.getStatusCodeValue());

            log.trace("Request Body: {}", body);
            log.trace("Response Body: {}", response.getBody());

        } else {
            log.debug("batch sent successfully!");
        }

        log.debug("EXIT sendBatch");

        return null;
    }, recoveryContext -> {
        log.trace("Failed after {} attempts!", counter.get());
        return null;
    });
}

From source file:net.solarnetwork.node.dao.jdbc.AbstractBatchableJdbcDao.java

private BatchResult batchProcessInternal(final BatchCallback<T> callback, final BatchOptions options) {
    final String querySql = getBatchJdbcStatement(options);
    final AtomicInteger rowCount = new AtomicInteger(0);
    getJdbcTemplate().execute(new ConnectionCallback<Object>() {

        @Override//from   w  w w. j a  va2  s .  c  o m
        public net.solarnetwork.node.dao.BatchableDao.BatchResult doInConnection(Connection con)
                throws SQLException, DataAccessException {
            PreparedStatement queryStmt = null;
            ResultSet queryResult = null;
            try {
                queryStmt = con.prepareStatement(querySql,
                        (options.isUpdatable() ? ResultSet.TYPE_SCROLL_SENSITIVE : ResultSet.TYPE_FORWARD_ONLY),
                        (options.isUpdatable() ? ResultSet.CONCUR_UPDATABLE : ResultSet.CONCUR_READ_ONLY),
                        ResultSet.CLOSE_CURSORS_AT_COMMIT);
                queryResult = queryStmt.executeQuery();
                while (queryResult.next()) {
                    T entity = getBatchRowEntity(options, queryResult, rowCount.incrementAndGet());
                    BatchCallbackResult rowResult = callback.handle(entity);
                    switch (rowResult) {
                    case CONTINUE:
                        break;
                    case STOP:
                        return null;
                    case DELETE:
                        queryResult.deleteRow();
                        break;
                    case UPDATE:
                    case UPDATE_STOP:
                        updateBatchRowEntity(options, queryResult, rowCount.intValue(), entity);
                        queryResult.updateRow();
                        if (rowResult == BatchCallbackResult.UPDATE_STOP) {
                            return null;
                        }
                        break;
                    }
                }
            } finally {
                if (queryResult != null) {
                    queryResult.close();
                }
                if (queryStmt != null) {
                    queryStmt.close();
                }
            }

            return null;
        }
    });
    return new BasicBatchResult(rowCount.intValue());
}

From source file:com.indeed.lsmtree.recordlog.TestBlockCompressedRecordFile.java

public void testRandomWithReader() throws IOException {
    final BlockCompressedRecordFile<String> recordFile = createBlockCache();
    final AtomicInteger done = new AtomicInteger(8);
    for (int i = 0; i < 8; i++) {
        final int index = i;
        new Thread(new Runnable() {
            @Override/*  w ww  .j a v  a  2 s.  co m*/
            public void run() {
                try {
                    final Random r = new Random(index);
                    for (int i = 0; i < 10000000; i++) {
                        int rand = r.nextInt(positions.size());
                        final RecordFile.Reader<String> reader = recordFile.reader(positions.get(rand));
                        assertTrue(reader.next());
                        assertEquals(reader.get(), strings.get(rand));
                        reader.close();
                    }
                } catch (IOException e) {
                    throw new RuntimeException(e);
                } finally {
                    done.decrementAndGet();
                }
            }
        }).start();
    }
    while (done.get() > 0) {
        Thread.yield();
    }
    recordFile.close();
}

From source file:com.datatorrent.bufferserver.internal.DataList.java

public DataList(final String identifier, final int blockSize, final int numberOfCacheBlocks,
        final boolean backPressureEnabled) {
    if (numberOfCacheBlocks < 1) {
        throw new IllegalArgumentException("Invalid number of Data List Memory blocks " + numberOfCacheBlocks);
    }/*from   w w  w  .  jav  a 2  s.com*/
    this.MAX_COUNT_OF_INMEM_BLOCKS = numberOfCacheBlocks;
    numberOfInMemBlockPermits = new AtomicInteger(MAX_COUNT_OF_INMEM_BLOCKS - 1);
    this.identifier = identifier;
    this.blockSize = blockSize;
    this.backPressureEnabled = backPressureEnabled;
    first = last = new Block(identifier, blockSize);
}

From source file:com.ning.arecibo.util.timeline.samples.TestSampleCoder.java

@Test(groups = "fast")
public void testTimeRangeSampleProcessor() throws Exception {
    final DateTime startTime = new DateTime(dateFormatter.parseDateTime("2012-03-23T17:35:11.000Z"));
    final DateTime endTime = new DateTime(dateFormatter.parseDateTime("2012-03-23T17:35:17.000Z"));
    final int sampleCount = 2;

    final List<DateTime> dateTimes = ImmutableList.<DateTime>of(startTime, endTime);
    final byte[] compressedTimes = timelineCoder.compressDateTimes(dateTimes);
    final TimelineCursorImpl cursor = new TimelineCursorImpl(compressedTimes, sampleCount);
    Assert.assertEquals(cursor.getNextTime(), startTime);
    Assert.assertEquals(cursor.getNextTime(), endTime);

    // 2 x the value 12: REPEAT_BYTE, SHORT, 2, SHORT, 12 (2 bytes)
    final byte[] samples = new byte[] { (byte) 0xff, 2, 2, 0, 12 };

    final AtomicInteger samplesCount = new AtomicInteger(0);
    sampleCoder.scan(samples, compressedTimes, sampleCount, new TimeRangeSampleProcessor(startTime, endTime) {
        @Override//w  w  w .  j a  v  a  2s  .  com
        public void processOneSample(final DateTime time, final SampleOpcode opcode, final Object value) {
            if (samplesCount.get() == 0) {
                Assert.assertEquals(DateTimeUtils.unixSeconds(time), DateTimeUtils.unixSeconds(startTime));
            } else {
                Assert.assertEquals(DateTimeUtils.unixSeconds(time), DateTimeUtils.unixSeconds(endTime));
            }
            samplesCount.incrementAndGet();
        }
    });
    Assert.assertEquals(samplesCount.get(), sampleCount);
}

From source file:com.sworddance.taskcontrol.TaskGroup.java

public TaskGroup(String name, Comparator<PrioritizedTask> taskComparator, FutureResultImplementor<T> result) {
    this.name = name;
    this.taskComparator = taskComparator;
    this.result = result;
    taskSequence = new AtomicInteger(0);
    tasksToBeRun = new ArrayList<PrioritizedTask>();
    locklessTasks = new ArrayList<PrioritizedTask>();
    deadTasks = new ArrayList<PrioritizedTask>();
    threadHistoryTracker = new ThreadHistoryTracker();
}

From source file:com.ikanow.aleph2.example.flume_harvester.utils.FlumeUtils.java

/** Auto-generates the flume config from an input block
 *  If it's in test mode it also deletes the trackerDir (so this can be used for purging)
 * @param bucket_config/*from  w ww  .j  ava  2s  .c o m*/
 * @param morphlines_config_path
 * @param test_mode
 * @return
 */
public static FlumeBucketConfigBean createAutoFlumeConfig(final DataBucketBean bucket,
        final FlumeBucketConfigBean bucket_config, final boolean test_mode) {
    //TODO (ALEPH-10): eventually add support for additiona short cuts here
    //TODO (ALEPH-10): security

    final Collection<SpoolDirConfig> dirs = getSpoolDirs(bucket_config);
    final AtomicInteger counter = new AtomicInteger(0);

    if (!dirs.isEmpty()) {
        final ImmutableMap<String, String> new_flume_builder = dirs.stream()
                .reduce(ImmutableMap.<String, String>builder()
                        // defaults
                        .put("channels", "mem").put("channels:mem:capacity", "1000")
                        .put("channels:mem:transactionCapacity", "100").put("channels:mem:type", "memory"),
                        (acc, v) -> {
                            final int count = counter.incrementAndGet();

                            // (some tidy up that occurs in test mode)
                            return Optional.<ImmutableMap.Builder<String, String>>of(acc
                                    .put("sources:file_in_" + count + ":type", "spooldir")
                                    .put("sources:file_in_" + count + ":channels", "mem")
                                    .put("sources:file_in_" + count + ":trackerDir",
                                            getTrackingDirSuffix(bucket))
                                    .put("sources:file_in_" + count + ":deletePolicy",
                                            (v.delete_on_ingest() ? "immediate" : "never"))
                                    .put("sources:file_in_" + count + ":spoolDir",
                                            test_mode ? v.path() + "/" + getTestDirSuffix(bucket) : v.path())
                                    .put("sources:file_in_" + count + ":ignorePattern",
                                            Optional.ofNullable(v.ignore_pattern()).orElse("^$")))
                                    // Some optional fields
                                    .map(acc2 -> {
                                        return Optional.ofNullable(v.append_basename_field()).map(field -> acc2
                                                .put("sources:file_in_" + count + ":basenameHeader", "true")
                                                .put("sources:file_in_" + count + ":basenameHeaderKey", field))
                                                .orElse(acc);
                                    }).map(acc2 -> {
                                        return Optional.ofNullable(v.append_path_field()).map(field -> acc2
                                                .put("sources:file_in_" + count + ":fileHeader", "true")
                                                .put("sources:file_in_" + count + ":fileHeaderKey", field))
                                                .orElse(acc);
                                    }).get();
                        }, (acc1, acc2) -> acc1 // (can't happen in practice)   
                ).put("sources", StreamUtils.zipWithIndex(dirs.stream())
                        .map(i -> ("file_in_" + (1 + i.getIndex()))).collect(Collectors.joining(" ")))
                .build();
        ;

        // Clone the config with the new flume config
        return BeanTemplateUtils.clone(bucket_config)
                .with(FlumeBucketConfigBean::flume_config, new_flume_builder).done();
    } else { // Leave unchanged
        return bucket_config;
    }
}