Example usage for java.util.concurrent.atomic AtomicLong incrementAndGet

List of usage examples for java.util.concurrent.atomic AtomicLong incrementAndGet

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicLong incrementAndGet.

Prototype

public final long incrementAndGet() 

Source Link

Document

Atomically increments the current value, with memory effects as specified by VarHandle#getAndAdd .

Usage

From source file:io.pravega.segmentstore.server.containers.StreamSegmentMapperTests.java

private void setupOperationLog(TestContext context) {
    AtomicLong seqNo = new AtomicLong();
    context.operationLog.addHandler = op -> {
        long currentSeqNo = seqNo.incrementAndGet();
        if (op instanceof StreamSegmentMapOperation) {
            StreamSegmentMapOperation mapOp = (StreamSegmentMapOperation) op;
            if (mapOp.getStreamSegmentId() == ContainerMetadata.NO_STREAM_SEGMENT_ID) {
                mapOp.setStreamSegmentId(currentSeqNo);
            }//  ww  w.ja  va2s .  c om

            UpdateableSegmentMetadata segmentMetadata = context.metadata
                    .mapStreamSegmentId(mapOp.getStreamSegmentName(), mapOp.getStreamSegmentId());
            segmentMetadata.setStorageLength(0);
            segmentMetadata.setDurableLogLength(mapOp.getLength());
            if (mapOp.isSealed()) {
                segmentMetadata.markSealed();
            }

            segmentMetadata.updateAttributes(mapOp.getAttributes());
        } else if (op instanceof TransactionMapOperation) {
            TransactionMapOperation mapOp = (TransactionMapOperation) op;
            if (mapOp.getStreamSegmentId() == ContainerMetadata.NO_STREAM_SEGMENT_ID) {
                mapOp.setStreamSegmentId(currentSeqNo);
            }

            UpdateableSegmentMetadata segmentMetadata = context.metadata.mapStreamSegmentId(
                    mapOp.getStreamSegmentName(), mapOp.getStreamSegmentId(), mapOp.getParentStreamSegmentId());
            segmentMetadata.setStorageLength(0);
            segmentMetadata.setDurableLogLength(mapOp.getLength());
            if (mapOp.isSealed()) {
                segmentMetadata.markSealed();
            }

            segmentMetadata.updateAttributes(mapOp.getAttributes());
        }

        return CompletableFuture.completedFuture(currentSeqNo);
    };
}

From source file:com.pinterest.pinlater.client.PinLaterQueryIssuer.java

private void issueEnqueueRequests(PinLater.ServiceIface iface) throws InterruptedException {
    Preconditions.checkNotNull(queueName, "Queue was not specified.");
    final AtomicLong queriesIssued = new AtomicLong(0);
    final Semaphore permits = new Semaphore(concurrency);
    while (numQueries == -1 || queriesIssued.get() < numQueries) {
        final PinLaterEnqueueRequest request = new PinLaterEnqueueRequest();
        request.setQueueName(queueName);
        for (int i = 0; i < batchSize; i++) {
            PinLaterJob job = new PinLaterJob(
                    ByteBuffer.wrap(new String("task_" + random.nextInt(Integer.MAX_VALUE)).getBytes()));
            job.setPriority(priority);//w  ww. j  a  v a  2 s . c  o m
            request.addToJobs(job);
        }
        final long startTimeNanos = System.nanoTime();
        queriesIssued.incrementAndGet();
        permits.acquire();
        iface.enqueueJobs(REQUEST_CONTEXT, request)
                .respond(new Function<Try<PinLaterEnqueueResponse>, BoxedUnit>() {
                    @Override
                    public BoxedUnit apply(Try<PinLaterEnqueueResponse> responseTry) {
                        permits.release();
                        statsLogger
                                .requestComplete(Duration.fromNanoseconds(System.nanoTime() - startTimeNanos));
                        if (responseTry.isThrow()) {
                            LOG.info("Exception for request: " + request + " : " + ((Throw) responseTry).e());
                        }
                        return BoxedUnit.UNIT;
                    }
                });
    }
    permits.acquire(concurrency);
    LOG.info("Enqueue queries issued: " + queriesIssued);
}

From source file:com.ning.arecibo.collector.persistent.TestTimelineAggregator.java

private void checkSamplesForATimeline(final Integer startTimeMinutesAgo, final Integer endTimeMinutesAgo,
        final long expectedChunks) throws InterruptedException {
    final AtomicLong timelineChunkSeen = new AtomicLong(0);

    timelineDAO.getSamplesByHostIdsAndSampleKindIds(ImmutableList.<Integer>of(hostId),
            ImmutableList.<Integer>of(minHeapUsedKindId, maxHeapUsedKindId),
            START_TIME.minusMinutes(startTimeMinutesAgo), START_TIME.minusMinutes(endTimeMinutesAgo),
            new TimelineChunkConsumer() {

                @Override/*  w w w. j  a v  a2 s  . c o  m*/
                public void processTimelineChunk(final TimelineChunk chunk) {
                    Assert.assertEquals((Integer) chunk.getHostId(), hostId);
                    Assert.assertTrue(chunk.getSampleKindId() == minHeapUsedKindId
                            || chunk.getSampleKindId() == maxHeapUsedKindId);
                    timelineChunkSeen.incrementAndGet();
                }
            });

    Assert.assertEquals(timelineChunkSeen.get(), expectedChunks);
}

From source file:com.github.jackygurui.vertxredissonrepository.repository.CounterConcurrentWaterfallTest.java

@Test
public void test5CounterConcurrentWaterfall(TestContext context) throws Exception {
    Async async = context.async();//from  w w w . ja v  a2  s  . c o  m
    HanyuPinyin.convert("");//warm up
    StopWatch sw = new StopWatch();
    sw.start();
    int records = 100;
    org.simondean.vertx.async.Async.<Long>series().task(customerRepository::totalCount).task(t -> {
        try {
            JsonNode source = JsonLoader.fromResource("/Customer.json");
            AtomicLong counter = new AtomicLong(0);
            IntStream.rangeClosed(1, records).parallel().forEach(e -> {
                JsonObject clone = new JsonObject(Json.encode(source));
                clone.getJsonObject("personalDetails").put("phoneNumber",
                        ((Long.parseLong(clone.getJsonObject("personalDetails").getString("phoneNumber"))
                                + 10000 + e) + ""));
                org.simondean.vertx.async.Async.waterfall().<String>task(tt -> {
                    customerRepository.create(Json.encode(clone), tt);
                }).<Customer>task((id, tt) -> {
                    customerRepository.get(id, tt);
                }).run((AsyncResult<Customer> r) -> {
                    long ct = counter.incrementAndGet();
                    //                logger.info("Counter = " + ct + " | success = " + !r.failed());
                    if (r.succeeded()) {
                        try {
                            Customer loaded = r.result();
                            Customer c = Json.decodeValue(clone.encode(), Customer.class);
                            c.setId(loaded.getId());
                            c.getAddressDetails().setId(loaded.getId());
                            c.getPersonalDetails().setId(loaded.getId());
                            String encoded = Json.encode(c);
                            if (!r.result().equals(encoded)) {
                                logger.info(loaded.getId() + " - SOURCE : " + encoded);
                                logger.info(loaded.getId() + " - RESULT : " + r.result());
                            }
                            context.assertEquals(Json.encode(r.result()), encoded);
                        } catch (Exception ex) {
                            t.handle(Future.failedFuture(ex));
                        }
                    } else {
                        t.handle(Future.failedFuture(r.cause()));
                    }
                    if (ct == records) {
                        t.handle(Future.succeededFuture(ct));
                    }
                });
            });
        } catch (IOException e) {
            t.handle(Future.failedFuture(e));
        }
    }).task(customerRepository::totalCount).run(r -> {
        if (r.succeeded()) {
            context.assertEquals(r.result().get(0) + r.result().get(1), r.result().get(2));
            sw.stop();
            logger.info("test count: time to count then concurrently save and get " + records
                    + " customer records and count again: " + sw.getTime());
            async.complete();
        } else {
            context.fail(r.cause());
            async.complete();
        }
    });
}

From source file:com.indeed.lsmtree.recordcache.PersistentRecordCache.java

/**
 * Performs lookup for multiple keys and returns a streaming iterator to results.
 * Each element in the iterator is one of
 *  (1) an exception associated with a single lookup
 *  (2) a key value tuple/*from w  w w.j a  v  a  2  s  .c  o m*/
 *
 * @param keys      lookup keys
 * @param progress  (optional) an AtomicInteger for tracking progress
 * @param skipped   (optional) an AtomicInteger for tracking missing keys
 * @return          iterator of lookup results
 */
public Iterator<Either<Exception, P2<K, V>>> getStreaming(final @Nonnull Iterator<K> keys,
        final @Nullable AtomicInteger progress, final @Nullable AtomicInteger skipped) {
    log.info("starting store lookups");
    LongArrayList addressList = new LongArrayList();
    int notFound = 0;
    while (keys.hasNext()) {
        final K key = keys.next();
        final Long address;
        try {
            address = index.get(key);
        } catch (IOException e) {
            log.error("error", e);
            return Iterators.singletonIterator(Left.<Exception, P2<K, V>>of(new IndexReadException(e)));
        }
        if (address != null) {
            addressList.add(address);
        } else {
            notFound++;
        }
    }
    if (progress != null)
        progress.addAndGet(notFound);
    if (skipped != null)
        skipped.addAndGet(notFound);
    log.info("store lookups complete, sorting addresses");

    final long[] addresses = addressList.elements();
    Arrays.sort(addresses, 0, addressList.size());

    log.info("initializing store lookup iterator");
    final BlockingQueue<Runnable> taskQueue = new ArrayBlockingQueue<Runnable>(100);
    final Iterator<List<Long>> iterable = Iterators.partition(addressList.iterator(), 1000);
    final ExecutorService primerThreads = new ThreadPoolExecutor(10, 10, 0L, TimeUnit.MILLISECONDS, taskQueue,
            new NamedThreadFactory("store priming thread", true, log), new RejectedExecutionHandler() {
                @Override
                public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
                    try {
                        taskQueue.put(r);
                    } catch (InterruptedException e) {
                        log.error("error", e);
                        throw new RuntimeException(e);
                    }
                }
            });
    final BlockingQueue<List<Either<Exception, P2<K, V>>>> completionQueue = new ArrayBlockingQueue<List<Either<Exception, P2<K, V>>>>(
            10);
    final AtomicLong runningTasks = new AtomicLong(0);
    final AtomicBoolean taskSubmitterRunning = new AtomicBoolean(true);

    new Thread(new Runnable() {
        @Override
        public void run() {
            while (iterable.hasNext()) {
                runningTasks.incrementAndGet();
                final List<Long> addressesSublist = iterable.next();
                primerThreads.submit(new FutureTask<List<Either<Exception, P2<K, V>>>>(
                        new RecordLookupTask(addressesSublist)) {
                    @Override
                    protected void done() {
                        try {
                            final List<Either<Exception, P2<K, V>>> results = get();
                            if (progress != null) {
                                progress.addAndGet(results.size());
                            }
                            completionQueue.put(results);
                        } catch (InterruptedException e) {
                            log.error("error", e);
                            throw new RuntimeException(e);
                        } catch (ExecutionException e) {
                            log.error("error", e);
                            throw new RuntimeException(e);
                        }
                    }
                });
            }
            taskSubmitterRunning.set(false);
        }
    }, "RecordLookupTaskSubmitterThread").start();

    return new Iterator<Either<Exception, P2<K, V>>>() {

        Iterator<Either<Exception, P2<K, V>>> currentIterator;

        @Override
        public boolean hasNext() {
            if (currentIterator != null && currentIterator.hasNext())
                return true;
            while (taskSubmitterRunning.get() || runningTasks.get() > 0) {
                try {
                    final List<Either<Exception, P2<K, V>>> list = completionQueue.poll(1, TimeUnit.SECONDS);
                    if (list != null) {
                        log.debug("remaining: " + runningTasks.decrementAndGet());
                        currentIterator = list.iterator();
                        if (currentIterator.hasNext())
                            return true;
                    }
                } catch (InterruptedException e) {
                    log.error("error", e);
                    throw new RuntimeException(e);
                }
            }
            primerThreads.shutdown();
            return false;
        }

        @Override
        public Either<Exception, P2<K, V>> next() {
            return currentIterator.next();
        }

        @Override
        public void remove() {
            throw new UnsupportedOperationException();
        }
    };
}

From source file:org.apache.qpid.server.jmx.mbeans.QueueMBean.java

public Long clearQueue() throws IOException, JMException {
    VirtualHost vhost = _queue.getParent(VirtualHost.class);
    final AtomicLong count = new AtomicLong();

    vhost.executeTransaction(new VirtualHost.TransactionalOperation() {
        public void withinTransaction(final VirtualHost.Transaction txn) {
            _queue.visit(new QueueEntryVisitor() {

                public boolean visit(final QueueEntry entry) {
                    final ServerMessage message = entry.getMessage();
                    if (message != null) {
                        txn.dequeue(entry);
                        count.incrementAndGet();

                    }//  w  ww .j  a v  a2 s. c  o  m
                    return false;
                }
            });

        }
    });
    return count.get();
}

From source file:io.pravega.service.server.containers.StreamSegmentMapperTests.java

private void setupOperationLog(TestContext context) {
    AtomicLong seqNo = new AtomicLong();
    context.operationLog.addHandler = op -> {
        if (op instanceof StreamSegmentMapOperation) {
            StreamSegmentMapOperation mapOp = (StreamSegmentMapOperation) op;
            mapOp.setStreamSegmentId(seqNo.incrementAndGet());
            UpdateableSegmentMetadata segmentMetadata = context.metadata
                    .mapStreamSegmentId(mapOp.getStreamSegmentName(), mapOp.getStreamSegmentId());
            segmentMetadata.setStorageLength(0);
            segmentMetadata.setDurableLogLength(mapOp.getLength());
            if (mapOp.isSealed()) {
                segmentMetadata.markSealed();
            }/*from w  w  w.j  a  v  a2 s  .  co  m*/

            segmentMetadata.updateAttributes(mapOp.getAttributes());
        } else if (op instanceof TransactionMapOperation) {
            TransactionMapOperation mapOp = (TransactionMapOperation) op;
            mapOp.setStreamSegmentId(seqNo.incrementAndGet());
            UpdateableSegmentMetadata segmentMetadata = context.metadata.mapStreamSegmentId(
                    mapOp.getStreamSegmentName(), mapOp.getStreamSegmentId(), mapOp.getParentStreamSegmentId());
            segmentMetadata.setStorageLength(0);
            segmentMetadata.setDurableLogLength(mapOp.getLength());
            if (mapOp.isSealed()) {
                segmentMetadata.markSealed();
            }

            segmentMetadata.updateAttributes(mapOp.getAttributes());
        }

        return CompletableFuture.completedFuture(seqNo.incrementAndGet());
    };
}

From source file:org.apache.hadoop.hbase.coordination.ZkSplitLogWorkerCoordination.java

/**
 * endTask() can fail and the only way to recover out of it is for the
 * {@link org.apache.hadoop.hbase.master.SplitLogManager} to timeout the task node.
 * @param slt//from   ww w  .j a  v a2s  .c  o  m
 * @param ctr
 */
@Override
public void endTask(SplitLogTask slt, AtomicLong ctr, SplitTaskDetails details) {
    ZkSplitTaskDetails zkDetails = (ZkSplitTaskDetails) details;
    String task = zkDetails.getTaskNode();
    int taskZKVersion = zkDetails.getCurTaskZKVersion().intValue();
    try {
        if (ZKUtil.setData(watcher, task, slt.toByteArray(), taskZKVersion)) {
            LOG.info("successfully transitioned task " + task + " to final state " + slt);
            ctr.incrementAndGet();
            return;
        }
        LOG.warn("failed to transistion task " + task + " to end state " + slt
                + " because of version mismatch ");
    } catch (KeeperException.BadVersionException bve) {
        LOG.warn("transisition task " + task + " to " + slt + " failed because of version mismatch", bve);
    } catch (KeeperException.NoNodeException e) {
        LOG.fatal("logic error - end task " + task + " " + slt + " failed because task doesn't exist", e);
    } catch (KeeperException e) {
        LOG.warn("failed to end task, " + task + " " + slt, e);
    }
    SplitLogCounters.tot_wkr_final_transition_failed.incrementAndGet();
}

From source file:com.cloudera.oryx.app.serving.als.LoadBenchmark.java

@Test
public void testRecommendLoad() throws Exception {
    AtomicLong count = new AtomicLong();
    Mean meanReqTimeMS = new Mean();
    long start = System.currentTimeMillis();

    int workers = LoadTestALSModelFactory.WORKERS;
    ExecUtils.doInParallel(workers, workers, true, i -> {
        RandomGenerator random = RandomManager.getRandom(Integer.toString(i).hashCode() ^ System.nanoTime());
        for (int j = 0; j < LoadTestALSModelFactory.REQS_PER_WORKER; j++) {
            String userID = "U" + random.nextInt(LoadTestALSModelFactory.USERS);
            long callStart = System.currentTimeMillis();
            target("/recommend/" + userID).request().accept(MediaType.APPLICATION_JSON_TYPE)
                    .get(LIST_ID_VALUE_TYPE);
            long timeMS = System.currentTimeMillis() - callStart;
            synchronized (meanReqTimeMS) {
                meanReqTimeMS.increment(timeMS);
            }//  ww  w.j a v  a2s  .c  o m
            long currentCount = count.incrementAndGet();
            if (currentCount % 100 == 0) {
                log(currentCount, meanReqTimeMS, start);
            }
        }
    });

    int totalRequests = workers * LoadTestALSModelFactory.REQS_PER_WORKER;
    log(totalRequests, meanReqTimeMS, start);
}

From source file:com.twitter.distributedlog.auditor.DLAuditor.java

private long calculateLedgerSpaceUsage(BookKeeperClient bkc, final ExecutorService executorService)
        throws IOException {
    final AtomicLong totalBytes = new AtomicLong(0);
    final AtomicLong totalEntries = new AtomicLong(0);
    final AtomicLong numLedgers = new AtomicLong(0);

    LedgerManager lm = BookKeeperAccessor.getLedgerManager(bkc.get());

    final SettableFuture<Void> doneFuture = SettableFuture.create();
    final BookKeeper bk = bkc.get();

    BookkeeperInternalCallbacks.Processor<Long> collector = new BookkeeperInternalCallbacks.Processor<Long>() {
        @Override//from   w ww  . j  a v a 2  s  .  c o  m
        public void process(final Long lid, final AsyncCallback.VoidCallback cb) {
            numLedgers.incrementAndGet();
            executorService.submit(new Runnable() {
                @Override
                public void run() {
                    bk.asyncOpenLedgerNoRecovery(lid, BookKeeper.DigestType.CRC32,
                            conf.getBKDigestPW().getBytes(UTF_8),
                            new org.apache.bookkeeper.client.AsyncCallback.OpenCallback() {
                                @Override
                                public void openComplete(int rc, LedgerHandle lh, Object ctx) {
                                    final int cbRc;
                                    if (BKException.Code.OK == rc) {
                                        totalBytes.addAndGet(lh.getLength());
                                        totalEntries.addAndGet(lh.getLastAddConfirmed() + 1);
                                        cbRc = rc;
                                    } else {
                                        cbRc = BKException.Code.ZKException;
                                    }
                                    executorService.submit(new Runnable() {
                                        @Override
                                        public void run() {
                                            cb.processResult(cbRc, null, null);
                                        }
                                    });
                                }
                            }, null);
                }
            });
        }
    };
    AsyncCallback.VoidCallback finalCb = new AsyncCallback.VoidCallback() {
        @Override
        public void processResult(int rc, String path, Object ctx) {
            if (BKException.Code.OK == rc) {
                doneFuture.set(null);
            } else {
                doneFuture.setException(BKException.create(rc));
            }
        }
    };
    lm.asyncProcessLedgers(collector, finalCb, null, BKException.Code.OK, BKException.Code.ZKException);
    try {
        doneFuture.get();
        logger.info("calculated {} ledgers\n\ttotal bytes = {}\n\ttotal entries = {}",
                new Object[] { numLedgers.get(), totalBytes.get(), totalEntries.get() });
    } catch (InterruptedException e) {
        Thread.currentThread().interrupt();
        throw new DLInterruptedException("Interrupted on calculating ledger space : ", e);
    } catch (ExecutionException e) {
        if (e.getCause() instanceof IOException) {
            throw (IOException) (e.getCause());
        } else {
            throw new IOException("Failed to calculate ledger space : ", e.getCause());
        }
    }
    return totalBytes.get();
}