Example usage for java.util.concurrent.atomic AtomicLong decrementAndGet

List of usage examples for java.util.concurrent.atomic AtomicLong decrementAndGet

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicLong decrementAndGet.

Prototype

public final long decrementAndGet() 

Source Link

Document

Atomically decrements the current value, with memory effects as specified by VarHandle#getAndAdd .

Usage

From source file:Main.java

public static void main(String[] argv) {
    AtomicLong nextId = new AtomicLong();

    System.out.println(nextId.getAndIncrement());
    System.out.println(nextId.decrementAndGet());
}

From source file:reactor.bus.SelectorUnitTests.java

@SuppressWarnings("unchecked")
private void runTest(String type, Function<Integer, Tuple2<Selector, Object>> fn) {
    final AtomicLong counter = new AtomicLong(selectors * iterations);
    Registry<Object, Consumer<?>> registry = Registries.create();

    Consumer<?> countDown = new Consumer<Object>() {
        @Override/*from ww w. j  a  v  a2s.  c  o m*/
        public void accept(Object obj) {
            counter.decrementAndGet();
        }
    };

    Selector<Object>[] sels = new Selector[selectors];
    Object[] keys = new Object[selectors];

    for (int i = 0; i < selectors; i++) {
        Tuple2<Selector, Object> tup = fn.apply(i);
        sels[i] = tup.getT1();
        keys[i] = tup.getT2();
        registry.register(sels[i], countDown);
    }

    long start = System.currentTimeMillis();
    for (int i = 0; i < selectors * iterations; i++) {
        int j = i % selectors;
        for (Registration<?, ? extends Consumer<?>> reg : registry.select(keys[j])) {
            reg.getObject().accept(null);
        }
    }
    long end = System.currentTimeMillis();
    double elapsed = (end - start);
    long throughput = Math.round((selectors * iterations) / (elapsed / 1000));
    LOG.info("{} throughput: {}M/s in {}ms", type, throughput, Math.round(elapsed));

    assertThat("All handlers have been found and executed.", counter.get() == 0);
}

From source file:jduagui.Controller.java

public static long getSize(String startPath, Map<String, Long> dirs, Map<String, Long> files)
        throws IOException {
    final AtomicLong size = new AtomicLong(0);
    final AtomicLong subdirs = new AtomicLong(0);
    final AtomicLong fs = new AtomicLong(0);
    final File f = new File(startPath);
    final String str = "";
    Path path = Paths.get(startPath);

    Files.walkFileTree(path, new SimpleFileVisitor<Path>() {
        @Override//  ww w.j  a v  a 2  s  .c om
        public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) {
            subdirs.incrementAndGet();
            return FileVisitResult.CONTINUE;
        }

        @Override
        public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
            fs.incrementAndGet();
            size.addAndGet(attrs.size());
            return FileVisitResult.CONTINUE;
        }

        @Override
        public FileVisitResult visitFileFailed(Path file, IOException exc) throws IOException {
            fs.incrementAndGet();
            return FileVisitResult.CONTINUE;
        }
    });
    if (subdirs.decrementAndGet() == -1)
        subdirs.incrementAndGet();

    if (f.isDirectory()) {
        dirs.put(startPath, subdirs.get());
        files.put(startPath, fs.get());
    }
    return size.get();
}

From source file:com.github.rinde.datgen.pdptw.DatasetGenerator.java

static void submitJob(final AtomicLong currentJobs, final ListeningExecutorService service,
        final ScenarioCreator job, final int numInstances, final Dataset<GeneratedScenario> dataset,
        final Map<GeneratorSettings, IdSeedGenerator> rngMap, final AtomicLong datasetSize) {

    if (service.isShutdown()) {
        return;//from   w w  w .  ja v  a2 s .co m
    }
    currentJobs.getAndIncrement();
    final ListenableFuture<GeneratedScenario> future = service.submit(job);
    Futures.addCallback(future, new FutureCallback<GeneratedScenario>() {
        @Override
        public void onSuccess(@Nullable GeneratedScenario result) {
            LOGGER.info(" - Job finished!");
            currentJobs.decrementAndGet();
            if (result == null) {
                final ScenarioCreator newJob = ScenarioCreator.create(rngMap.get(job.getSettings()).next(),
                        job.getSettings(), job.getGenerator());

                LOGGER.info(" - Job result was NULL, submitting new job");

                submitJob(currentJobs, service, newJob, numInstances, dataset, rngMap, datasetSize);
                return;
            }
            final GeneratedScenario res = verifyNotNull(result);
            if (dataset.get(res.getDynamismBin(), res.getSettings().getUrgency(), res.getSettings().getScale())
                    .size() < numInstances) {

                datasetSize.getAndIncrement();
                LOGGER.info(" - Job Putting dataset...");
                dataset.put(res.getDynamismBin(), res.getSettings().getUrgency(), res.getSettings().getScale(),
                        res);
            } else {
                // TODO check if this job should be respawned by seeing if it uses the
                // correct TSG

                // TODO respawn more tasks if currentJobs < numThreads
                final Collection<Double> dynamismLevels = job.getSettings().getDynamismRangeCenters()
                        .asMapOfRanges().values();

                boolean needMore = false;
                for (final Double d : dynamismLevels) {
                    if (dataset.get(d, res.getSettings().getUrgency(), res.getSettings().getScale())
                            .size() < numInstances) {
                        needMore = true;
                        break;
                    }
                }

                if (needMore) {
                    // respawn job

                    final ScenarioCreator newJob = ScenarioCreator.create(rngMap.get(job.getSettings()).next(),
                            job.getSettings(), job.getGenerator());

                    if (!service.isShutdown()) {
                        submitJob(currentJobs, service, newJob, numInstances, dataset, rngMap, datasetSize);
                    }
                }
            }
        }

        @Override
        public void onFailure(Throwable t) {
            throw new IllegalStateException(t);
        }
    }, MoreExecutors.directExecutor());
}

From source file:io.pravega.segmentstore.server.containers.StreamSegmentMapperTests.java

/**
 * Tests the ability of the StreamSegmentMapper to generate/return the Id of an existing StreamSegment, as well as
 * retrieving existing attributes.//  ww w  .  ja  v  a2  s.  c o m
 */
@Test
public void testGetOrAssignStreamSegmentId() {
    final int segmentCount = 10;
    final int transactionsPerSegment = 5;
    final long noSegmentId = ContainerMetadata.NO_STREAM_SEGMENT_ID;
    AtomicLong currentSegmentId = new AtomicLong(Integer.MAX_VALUE);
    Supplier<Long> nextSegmentId = () -> currentSegmentId.decrementAndGet() % 2 == 0 ? noSegmentId
            : currentSegmentId.get();

    @Cleanup
    TestContext context = new TestContext();

    HashSet<String> storageSegments = new HashSet<>();
    for (int i = 0; i < segmentCount; i++) {
        String segmentName = getName(i);
        storageSegments.add(segmentName);
        setAttributes(segmentName, nextSegmentId.get(), storageSegments.size() % ATTRIBUTE_COUNT, context);

        for (int j = 0; j < transactionsPerSegment; j++) {
            // There is a small chance of a name conflict here, but we don't care. As long as we get at least one
            // Transaction per segment, we should be fine.
            String transactionName = StreamSegmentNameUtils.getTransactionNameFromId(segmentName,
                    UUID.randomUUID());
            storageSegments.add(transactionName);
            setAttributes(transactionName, nextSegmentId.get(), storageSegments.size() % ATTRIBUTE_COUNT,
                    context);
        }
    }

    // We setup all necessary handlers, except the one for create. We do not need to create new Segments here.
    setupOperationLog(context);
    Predicate<String> isSealed = segmentName -> segmentName.hashCode() % 2 == 0;
    Function<String, Long> getInitialLength = segmentName -> (long) Math.abs(segmentName.hashCode());
    setupStorageGetHandler(context, storageSegments, segmentName -> new StreamSegmentInformation(segmentName,
            getInitialLength.apply(segmentName), isSealed.test(segmentName), false, new ImmutableDate()));

    // First, map all the parents (stand-alone segments).
    for (String name : storageSegments) {
        if (StreamSegmentNameUtils.getParentStreamSegmentName(name) == null) {
            long id = context.mapper.getOrAssignStreamSegmentId(name, TIMEOUT).join();
            Assert.assertNotEquals("No id was assigned for StreamSegment " + name,
                    ContainerMetadata.NO_STREAM_SEGMENT_ID, id);
            SegmentMetadata sm = context.metadata.getStreamSegmentMetadata(id);
            Assert.assertNotNull("No metadata was created for StreamSegment " + name, sm);
            long expectedLength = getInitialLength.apply(name);
            boolean expectedSeal = isSealed.test(name);
            Assert.assertEquals("Metadata does not have the expected length for StreamSegment " + name,
                    expectedLength, sm.getDurableLogLength());
            Assert.assertEquals(
                    "Metadata does not have the expected value for isSealed for StreamSegment " + name,
                    expectedSeal, sm.isSealed());

            val segmentState = context.stateStore.get(name, TIMEOUT).join();
            Map<UUID, Long> expectedAttributes = segmentState == null ? null : segmentState.getAttributes();
            SegmentMetadataComparer.assertSameAttributes(
                    "Unexpected attributes in metadata for StreamSegment " + name, expectedAttributes, sm);
        }
    }

    // Now, map all the Transactions.
    for (String name : storageSegments) {
        String parentName = StreamSegmentNameUtils.getParentStreamSegmentName(name);
        if (parentName != null) {
            long id = context.mapper.getOrAssignStreamSegmentId(name, TIMEOUT).join();
            Assert.assertNotEquals("No id was assigned for Transaction " + name,
                    ContainerMetadata.NO_STREAM_SEGMENT_ID, id);
            SegmentMetadata sm = context.metadata.getStreamSegmentMetadata(id);
            Assert.assertNotNull("No metadata was created for Transaction " + name, sm);
            long expectedLength = getInitialLength.apply(name);
            boolean expectedSeal = isSealed.test(name);
            Assert.assertEquals("Metadata does not have the expected length for Transaction " + name,
                    expectedLength, sm.getDurableLogLength());
            Assert.assertEquals(
                    "Metadata does not have the expected value for isSealed for Transaction " + name,
                    expectedSeal, sm.isSealed());

            val segmentState = context.stateStore.get(name, TIMEOUT).join();
            Map<UUID, Long> expectedAttributes = segmentState == null ? null : segmentState.getAttributes();
            SegmentMetadataComparer.assertSameAttributes(
                    "Unexpected attributes in metadata for Transaction " + name, expectedAttributes, sm);

            // Check parenthood.
            Assert.assertNotEquals("No parent defined in metadata for Transaction " + name,
                    ContainerMetadata.NO_STREAM_SEGMENT_ID, sm.getParentId());
            long parentId = context.metadata.getStreamSegmentId(parentName, false);
            Assert.assertEquals("Unexpected parent defined in metadata for Transaction " + name, parentId,
                    sm.getParentId());
        }
    }
}

From source file:io.druid.client.cache.MemcachedCache.java

public static MemcachedCache create(final MemcachedCacheConfig config) {
    final ConcurrentMap<String, AtomicLong> counters = new ConcurrentHashMap<>();
    final ConcurrentMap<String, AtomicLong> meters = new ConcurrentHashMap<>();
    final AbstractMonitor monitor = new AbstractMonitor() {
        final AtomicReference<Map<String, Long>> priorValues = new AtomicReference<Map<String, Long>>(
                new HashMap<String, Long>());

        @Override//w w w . ja v a2s .  co m
        public boolean doMonitor(ServiceEmitter emitter) {
            final Map<String, Long> priorValues = this.priorValues.get();
            final Map<String, Long> currentValues = getCurrentValues();
            final ServiceMetricEvent.Builder builder = ServiceMetricEvent.builder();
            for (Map.Entry<String, Long> entry : currentValues.entrySet()) {
                emitter.emit(builder.setDimension("memcached metric", entry.getKey())
                        .build("query/cache/memcached/total", entry.getValue()));
                final Long prior = priorValues.get(entry.getKey());
                if (prior != null) {
                    emitter.emit(builder.setDimension("memcached metric", entry.getKey())
                            .build("query/cache/memcached/delta", entry.getValue() - prior));
                }
            }

            if (!this.priorValues.compareAndSet(priorValues, currentValues)) {
                log.error("Prior value changed while I was reporting! updating anyways");
                this.priorValues.set(currentValues);
            }
            return true;
        }

        private Map<String, Long> getCurrentValues() {
            final ImmutableMap.Builder<String, Long> builder = ImmutableMap.builder();
            for (Map.Entry<String, AtomicLong> entry : counters.entrySet()) {
                builder.put(entry.getKey(), entry.getValue().get());
            }
            for (Map.Entry<String, AtomicLong> entry : meters.entrySet()) {
                builder.put(entry.getKey(), entry.getValue().get());
            }
            return builder.build();
        }
    };
    try {
        LZ4Transcoder transcoder = new LZ4Transcoder(config.getMaxObjectSize());

        // always use compression
        transcoder.setCompressionThreshold(0);

        OperationQueueFactory opQueueFactory;
        long maxQueueBytes = config.getMaxOperationQueueSize();
        if (maxQueueBytes > 0) {
            opQueueFactory = new MemcachedOperationQueueFactory(maxQueueBytes);
        } else {
            opQueueFactory = new LinkedOperationQueueFactory();
        }

        final Predicate<String> interesting = new Predicate<String>() {
            // See net.spy.memcached.MemcachedConnection.registerMetrics()
            private final Set<String> interestingMetrics = ImmutableSet.of(
                    "[MEM] Reconnecting Nodes (ReconnectQueue)",
                    //"[MEM] Shutting Down Nodes (NodesToShutdown)", // Busted
                    "[MEM] Request Rate: All", "[MEM] Average Bytes written to OS per write",
                    "[MEM] Average Bytes read from OS per read",
                    "[MEM] Average Time on wire for operations (s)",
                    "[MEM] Response Rate: All (Failure + Success + Retry)", "[MEM] Response Rate: Retry",
                    "[MEM] Response Rate: Failure", "[MEM] Response Rate: Success");

            @Override
            public boolean apply(@Nullable String input) {
                return input != null && interestingMetrics.contains(input);
            }
        };

        final MetricCollector metricCollector = new MetricCollector() {
            @Override
            public void addCounter(String name) {
                if (!interesting.apply(name)) {
                    return;
                }
                counters.putIfAbsent(name, new AtomicLong(0L));

                if (log.isDebugEnabled()) {
                    log.debug("Add Counter [%s]", name);
                }
            }

            @Override
            public void removeCounter(String name) {
                if (log.isDebugEnabled()) {
                    log.debug("Ignoring request to remove [%s]", name);
                }
            }

            @Override
            public void incrementCounter(String name) {
                if (!interesting.apply(name)) {
                    return;
                }
                AtomicLong counter = counters.get(name);
                if (counter == null) {
                    counters.putIfAbsent(name, new AtomicLong(0));
                    counter = counters.get(name);
                }
                counter.incrementAndGet();

                if (log.isDebugEnabled()) {
                    log.debug("Increment [%s]", name);
                }
            }

            @Override
            public void incrementCounter(String name, int amount) {
                if (!interesting.apply(name)) {
                    return;
                }
                AtomicLong counter = counters.get(name);
                if (counter == null) {
                    counters.putIfAbsent(name, new AtomicLong(0));
                    counter = counters.get(name);
                }
                counter.addAndGet(amount);

                if (log.isDebugEnabled()) {
                    log.debug("Increment [%s] %d", name, amount);
                }
            }

            @Override
            public void decrementCounter(String name) {
                if (!interesting.apply(name)) {
                    return;
                }
                AtomicLong counter = counters.get(name);
                if (counter == null) {
                    counters.putIfAbsent(name, new AtomicLong(0));
                    counter = counters.get(name);
                }
                counter.decrementAndGet();

                if (log.isDebugEnabled()) {
                    log.debug("Decrement [%s]", name);
                }
            }

            @Override
            public void decrementCounter(String name, int amount) {
                if (!interesting.apply(name)) {
                    return;
                }
                AtomicLong counter = counters.get(name);
                if (counter == null) {
                    counters.putIfAbsent(name, new AtomicLong(0L));
                    counter = counters.get(name);
                }
                counter.addAndGet(-amount);

                if (log.isDebugEnabled()) {
                    log.debug("Decrement [%s] %d", name, amount);
                }
            }

            @Override
            public void addMeter(String name) {
                if (!interesting.apply(name)) {
                    return;
                }
                meters.putIfAbsent(name, new AtomicLong(0L));
                if (log.isDebugEnabled()) {
                    log.debug("Adding meter [%s]", name);
                }
            }

            @Override
            public void removeMeter(String name) {
                if (!interesting.apply(name)) {
                    return;
                }
                if (log.isDebugEnabled()) {
                    log.debug("Ignoring request to remove meter [%s]", name);
                }
            }

            @Override
            public void markMeter(String name) {
                if (!interesting.apply(name)) {
                    return;
                }
                AtomicLong meter = meters.get(name);
                if (meter == null) {
                    meters.putIfAbsent(name, new AtomicLong(0L));
                    meter = meters.get(name);
                }
                meter.incrementAndGet();

                if (log.isDebugEnabled()) {
                    log.debug("Increment counter [%s]", name);
                }
            }

            @Override
            public void addHistogram(String name) {
                log.debug("Ignoring add histogram [%s]", name);
            }

            @Override
            public void removeHistogram(String name) {
                log.debug("Ignoring remove histogram [%s]", name);
            }

            @Override
            public void updateHistogram(String name, int amount) {
                log.debug("Ignoring update histogram [%s]: %d", name, amount);
            }
        };

        final ConnectionFactory connectionFactory = new MemcachedCustomConnectionFactoryBuilder()
                // 1000 repetitions gives us good distribution with murmur3_128
                // (approx < 5% difference in counts across nodes, with 5 cache nodes)
                .setKetamaNodeRepetitions(1000).setHashAlg(MURMUR3_128)
                .setProtocol(ConnectionFactoryBuilder.Protocol.BINARY)
                .setLocatorType(ConnectionFactoryBuilder.Locator.CONSISTENT).setDaemon(true)
                .setFailureMode(FailureMode.Cancel).setTranscoder(transcoder).setShouldOptimize(true)
                .setOpQueueMaxBlockTime(config.getTimeout()).setOpTimeout(config.getTimeout())
                .setReadBufferSize(config.getReadBufferSize()).setOpQueueFactory(opQueueFactory)
                .setMetricCollector(metricCollector).setEnableMetrics(MetricType.DEBUG) // Not as scary as it sounds
                .build();

        final List<InetSocketAddress> hosts = AddrUtil.getAddresses(config.getHosts());

        final Supplier<ResourceHolder<MemcachedClientIF>> clientSupplier;

        if (config.getNumConnections() > 1) {
            clientSupplier = new LoadBalancingPool<MemcachedClientIF>(config.getNumConnections(),
                    new Supplier<MemcachedClientIF>() {
                        @Override
                        public MemcachedClientIF get() {
                            try {
                                return new MemcachedClient(connectionFactory, hosts);
                            } catch (IOException e) {
                                log.error(e, "Unable to create memcached client");
                                throw Throwables.propagate(e);
                            }
                        }
                    });
        } else {
            clientSupplier = Suppliers.<ResourceHolder<MemcachedClientIF>>ofInstance(StupidResourceHolder
                    .<MemcachedClientIF>create(new MemcachedClient(connectionFactory, hosts)));
        }

        return new MemcachedCache(clientSupplier, config, monitor);
    } catch (IOException e) {
        throw Throwables.propagate(e);
    }
}

From source file:com.indeed.lsmtree.recordcache.PersistentRecordCache.java

/**
 * Performs lookup for multiple keys and returns a streaming iterator to results.
 * Each element in the iterator is one of
 *  (1) an exception associated with a single lookup
 *  (2) a key value tuple/*from   w w  w  . ja v a2 s.  c  om*/
 *
 * @param keys      lookup keys
 * @param progress  (optional) an AtomicInteger for tracking progress
 * @param skipped   (optional) an AtomicInteger for tracking missing keys
 * @return          iterator of lookup results
 */
public Iterator<Either<Exception, P2<K, V>>> getStreaming(final @Nonnull Iterator<K> keys,
        final @Nullable AtomicInteger progress, final @Nullable AtomicInteger skipped) {
    log.info("starting store lookups");
    LongArrayList addressList = new LongArrayList();
    int notFound = 0;
    while (keys.hasNext()) {
        final K key = keys.next();
        final Long address;
        try {
            address = index.get(key);
        } catch (IOException e) {
            log.error("error", e);
            return Iterators.singletonIterator(Left.<Exception, P2<K, V>>of(new IndexReadException(e)));
        }
        if (address != null) {
            addressList.add(address);
        } else {
            notFound++;
        }
    }
    if (progress != null)
        progress.addAndGet(notFound);
    if (skipped != null)
        skipped.addAndGet(notFound);
    log.info("store lookups complete, sorting addresses");

    final long[] addresses = addressList.elements();
    Arrays.sort(addresses, 0, addressList.size());

    log.info("initializing store lookup iterator");
    final BlockingQueue<Runnable> taskQueue = new ArrayBlockingQueue<Runnable>(100);
    final Iterator<List<Long>> iterable = Iterators.partition(addressList.iterator(), 1000);
    final ExecutorService primerThreads = new ThreadPoolExecutor(10, 10, 0L, TimeUnit.MILLISECONDS, taskQueue,
            new NamedThreadFactory("store priming thread", true, log), new RejectedExecutionHandler() {
                @Override
                public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) {
                    try {
                        taskQueue.put(r);
                    } catch (InterruptedException e) {
                        log.error("error", e);
                        throw new RuntimeException(e);
                    }
                }
            });
    final BlockingQueue<List<Either<Exception, P2<K, V>>>> completionQueue = new ArrayBlockingQueue<List<Either<Exception, P2<K, V>>>>(
            10);
    final AtomicLong runningTasks = new AtomicLong(0);
    final AtomicBoolean taskSubmitterRunning = new AtomicBoolean(true);

    new Thread(new Runnable() {
        @Override
        public void run() {
            while (iterable.hasNext()) {
                runningTasks.incrementAndGet();
                final List<Long> addressesSublist = iterable.next();
                primerThreads.submit(new FutureTask<List<Either<Exception, P2<K, V>>>>(
                        new RecordLookupTask(addressesSublist)) {
                    @Override
                    protected void done() {
                        try {
                            final List<Either<Exception, P2<K, V>>> results = get();
                            if (progress != null) {
                                progress.addAndGet(results.size());
                            }
                            completionQueue.put(results);
                        } catch (InterruptedException e) {
                            log.error("error", e);
                            throw new RuntimeException(e);
                        } catch (ExecutionException e) {
                            log.error("error", e);
                            throw new RuntimeException(e);
                        }
                    }
                });
            }
            taskSubmitterRunning.set(false);
        }
    }, "RecordLookupTaskSubmitterThread").start();

    return new Iterator<Either<Exception, P2<K, V>>>() {

        Iterator<Either<Exception, P2<K, V>>> currentIterator;

        @Override
        public boolean hasNext() {
            if (currentIterator != null && currentIterator.hasNext())
                return true;
            while (taskSubmitterRunning.get() || runningTasks.get() > 0) {
                try {
                    final List<Either<Exception, P2<K, V>>> list = completionQueue.poll(1, TimeUnit.SECONDS);
                    if (list != null) {
                        log.debug("remaining: " + runningTasks.decrementAndGet());
                        currentIterator = list.iterator();
                        if (currentIterator.hasNext())
                            return true;
                    }
                } catch (InterruptedException e) {
                    log.error("error", e);
                    throw new RuntimeException(e);
                }
            }
            primerThreads.shutdown();
            return false;
        }

        @Override
        public Either<Exception, P2<K, V>> next() {
            return currentIterator.next();
        }

        @Override
        public void remove() {
            throw new UnsupportedOperationException();
        }
    };
}

From source file:com.github.jackygurui.vertxredissonrepository.repository.Impl.RedisRepositoryImpl.java

private void persistBlocking(String id, JsonObject data, RBatch redissonBatch,
        Handler<AsyncResult<Boolean>> resultHandler) {
    RBatch batch = redissonBatch == null ? redissonWrite.createBatch() : redissonBatch;
    AtomicBoolean failed = new AtomicBoolean(false);
    try {//w  w w  .j a v a  2 s . com
        BeanMap pMap = new BeanMap(cls.newInstance());
        //remove the indexes;
        if (isRedisEntity()) {
            AtomicBoolean finished = new AtomicBoolean(false);
            AtomicBoolean hasNested = new AtomicBoolean(false);
            AtomicLong stack = new AtomicLong();
            pMap.forEach((k, v) -> {
                if ("class".equals(k)) {
                    return;
                }
                Class<?> type = pMap.getType((String) k);
                if (!isRedisEntity(type)) {
                    //recreate the indexes;
                    if ("id".equals(k)) {
                        batch.getMap(getStorageKey(), StringCodec.INSTANCE).fastPutAsync(id, id);
                    } else {
                        batch.getMap(getStorageKey((String) k)).fastPutAsync(id, data.getValue((String) k));
                    }
                } else {
                    hasNested.set(true);
                    stack.incrementAndGet();
                    RedisRepositoryImpl<?> innerRepo;
                    try {
                        innerRepo = (RedisRepositoryImpl) factory.instance(type);
                    } catch (RepositoryException e) {
                        throw new RuntimeException(e);
                    }
                    JsonObject value = data.getJsonObject((String) k);
                    final boolean newOne = !value.containsKey("id") || value.getString("id") == null
                            || "null".equals(value.getString("id"));
                    final String ID = newOne ? id : value.getString("id");
                    innerRepo.persist(ID, value, batch, c -> {//making the nested entity shares the same id as the parent when its 1:1 relation. This makes fetch a lot faster since it doesn't not need to resolve the reference when fetching 1:1 nested objects.
                        if (c.succeeded()) {
                            long s = stack.decrementAndGet();
                            if (newOne) {
                                batch.getMap(getStorageKey((String) k)).fastPutAsync(id, ID);//different to the update, create needs to add the reference field to batch
                            }
                            if (s == 0 && finished.get() && !failed.get()) { //finished iterating and no outstanding processes. 
                                if (redissonBatch == null) {//if it's not inside a nested process.
                                    finishPersist(id, data, batch, resultHandler);
                                } else {//if it is inside a nested process.
                                    resultHandler.handle(Future.succeededFuture(true));
                                }
                            }
                            //else wait for others to complete
                        } else {
                            boolean firstToFail = failed.compareAndSet(false, true);
                            if (firstToFail) {
                                resultHandler.handle(Future.failedFuture(c.cause()));
                            }
                        }
                    });
                }
            });
            batch.getAtomicLongAsync(getCounterKey()).incrementAndGetAsync();
            finished.set(true);
            if (!hasNested.get()) {//does not have nested RedissonEntity within
                if (redissonBatch == null) {//if it's not inside a nested process.
                    finishPersist(id, data, batch, resultHandler);
                } else {//if it is inside a nested process.
                    resultHandler.handle(Future.succeededFuture(true));
                }
            }
        } else {//not a RedissonEntity class, persist as json string.
            //recreate the indexes;
            batch.<String, String>getMap(getStorageKey(), StringCodec.INSTANCE).fastPutAsync(id,
                    Json.encode(data));
            batch.getAtomicLongAsync(getCounterKey()).incrementAndGetAsync();
            if (redissonBatch == null) {//if it's not inside a nested process.
                finishPersist(id, data, batch, resultHandler);
            } else {//if it is inside a nested process.
                resultHandler.handle(Future.succeededFuture(true));
            }
        }
    } catch (InstantiationException | IllegalAccessException | RuntimeException ex) {
        failed.set(true);
        resultHandler.handle(Future.failedFuture(ex));
    }
}

From source file:com.jivesoftware.os.routing.bird.deployable.TenantRoutingBirdProviderBuilder.java

public ConnectionDescriptorsProvider build(OAuthSigner signer) {
    HttpClientConfig httpClientConfig = HttpClientConfig.newBuilder().build();
    final HttpClient httpClient = new HttpClientFactoryProvider()
            .createHttpClientFactory(Collections.singletonList(httpClientConfig), false)
            .createClient(signer, routesHost, routesPort);

    AtomicLong activeCount = new AtomicLong();
    final ObjectMapper mapper = new ObjectMapper();
    mapper.configure(SerializationFeature.INDENT_OUTPUT, true);
    mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
    ConnectionDescriptorsProvider connectionsProvider = (connectionsRequest, expectedReleaseGroup) -> {
        activeCount.incrementAndGet();/*from  w  w  w.  j  av  a  2s . co m*/
        try {
            LOG.debug("Requesting connections:{}", connectionsRequest);

            String postEntity;
            try {
                postEntity = mapper.writeValueAsString(connectionsRequest);
            } catch (JsonProcessingException e) {
                LOG.error("Error serializing request parameters object to a string.  Object " + "was "
                        + connectionsRequest + " " + e.getMessage());
                return null;
            }

            HttpResponse response;
            try {
                response = httpClient.postJson(routesPath, postEntity, null);
            } catch (HttpClientException e) {
                LOG.error(
                        "Error posting query request to server.  The entity posted was {} and the endpoint posted to was {}",
                        new Object[] { postEntity, routesPath }, e);
                return null;
            }

            int statusCode = response.getStatusCode();
            if (statusCode >= 200 && statusCode < 300) {
                byte[] responseBody = response.getResponseBody();
                try {
                    ConnectionDescriptorsResponse connectionDescriptorsResponse = mapper.readValue(responseBody,
                            ConnectionDescriptorsResponse.class);
                    if (!connectionsRequest.getRequestUuid()
                            .equals(connectionDescriptorsResponse.getRequestUuid())) {
                        LOG.warn("Request UUIDs are misaligned, request:{} response:{}", connectionsRequest,
                                connectionDescriptorsResponse);
                    }
                    if (connectionDescriptorsResponse.getReturnCode() >= 0 && expectedReleaseGroup != null
                            && !expectedReleaseGroup.equals(connectionDescriptorsResponse.getReleaseGroup())) {
                        String responseEntity = new String(responseBody, StandardCharsets.UTF_8);
                        LOG.warn(
                                "Release group changed, active:{} request:{} requestEntity:{} responseEntity:{} response:{}",
                                activeCount.get(), connectionsRequest, postEntity, responseEntity,
                                connectionDescriptorsResponse);
                    }
                    LOG.debug("Request:{} ConnectionDescriptors:{}", connectionsRequest,
                            connectionDescriptorsResponse);
                    return connectionDescriptorsResponse;
                } catch (IOException x) {
                    LOG.error("Failed to deserialize response:" + new String(responseBody) + " "
                            + x.getMessage());
                    return null;
                }
            }
            return null;
        } finally {
            activeCount.decrementAndGet();
        }
    };
    return connectionsProvider;
}

From source file:org.apache.bookkeeper.metadata.etcd.Etcd64bitIdGeneratorTest.java

/**
 * Test generating id in parallel and ensure there is no duplicated id.
 *///from  w w  w  .jav a2 s  . c o m
@Test
public void testGenerateIdParallel() throws Exception {
    final int numThreads = 10;
    @Cleanup("shutdown")
    ExecutorService executor = Executors.newFixedThreadPool(numThreads);

    final int numIds = 10000;
    final AtomicLong totalIds = new AtomicLong(numIds);
    final Set<Long> ids = Collections.newSetFromMap(new ConcurrentHashMap<>());
    final RateLimiter limiter = RateLimiter.create(1000);
    final CompletableFuture<Void> doneFuture = new CompletableFuture<>();
    for (int i = 0; i < numThreads; i++) {
        executor.submit(() -> {
            Client client = Client.builder().endpoints(etcdContainer.getClientEndpoint()).build();
            Etcd64bitIdGenerator gen = new Etcd64bitIdGenerator(client.getKVClient(), scope);

            AtomicBoolean running = new AtomicBoolean(true);

            while (running.get()) {
                limiter.acquire();

                GenericCallbackFuture<Long> genFuture = new GenericCallbackFuture<>();
                gen.generateLedgerId(genFuture);

                genFuture.thenAccept(lid -> {
                    boolean duplicatedFound = !(ids.add(lid));
                    if (duplicatedFound) {
                        running.set(false);
                        doneFuture.completeExceptionally(
                                new IllegalStateException("Duplicated id " + lid + " generated : " + ids));
                        return;
                    } else {
                        if (totalIds.decrementAndGet() <= 0) {
                            running.set(false);
                            doneFuture.complete(null);
                        }
                    }
                }).exceptionally(cause -> {
                    running.set(false);
                    doneFuture.completeExceptionally(cause);
                    return null;
                });
            }
        });
    }

    FutureUtils.result(doneFuture);
    assertTrue(totalIds.get() <= 0);
    assertTrue(ids.size() >= numIds);
}