Example usage for java.util.concurrent.atomic AtomicLong AtomicLong

List of usage examples for java.util.concurrent.atomic AtomicLong AtomicLong

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicLong AtomicLong.

Prototype

public AtomicLong(long initialValue) 

Source Link

Document

Creates a new AtomicLong with the given initial value.

Usage

From source file:com.taobao.tddl.common.util.TDDLMBeanServer.java

private String getId(String name, String idPrefix) {
    ConcurrentHashMap<String, AtomicLong> subMap = idMap.get(name);
    if (null == subMap) {
        lock.lock();//from  www  . ja  va  2 s.  com
        try {
            subMap = idMap.get(name);
            if (null == subMap) {
                subMap = new ConcurrentHashMap<String, AtomicLong>();
                idMap.put(name, subMap);
            }
        } finally {
            lock.unlock();
        }
    }

    AtomicLong indexValue = subMap.get(idPrefix);
    if (null == indexValue) {
        lock.lock();
        try {
            indexValue = subMap.get(idPrefix);
            if (null == indexValue) {
                indexValue = new AtomicLong(0);
                subMap.put(idPrefix, indexValue);
            }
        } finally {
            lock.unlock();
        }
    }
    long value = indexValue.incrementAndGet();
    String result = idPrefix + "-" + value;
    return result;
}

From source file:com.linkedin.pinot.broker.requesthandler.BrokerRequestHandler.java

public BrokerRequestHandler(RoutingTable table, TimeBoundaryService timeBoundaryService,
        ScatterGather scatterGatherer, ReduceServiceRegistry reduceServiceRegistry, BrokerMetrics brokerMetrics,
        Configuration config) {//from   ww  w. j  ava  2  s.c  om
    _routingTable = table;
    _timeBoundaryService = timeBoundaryService;
    _reduceServiceRegistry = reduceServiceRegistry;
    _scatterGatherer = scatterGatherer;
    _replicaSelection = new RoundRobinReplicaSelection();
    _brokerMetrics = brokerMetrics;
    _optimizer = new BrokerRequestOptimizer();
    _requestIdGenerator = new AtomicLong(0);
    _queryResponseLimit = config.getInt(BROKER_QUERY_RESPONSE_LIMIT_CONFIG,
            DEFAULT_BROKER_QUERY_RESPONSE_LIMIT);
    _brokerTimeOutMs = config.getLong(BROKER_TIME_OUT_CONFIG, DEFAULT_BROKER_TIME_OUT_MS);
    _brokerId = config.getString(BROKER_ID_CONFIG_KEY, DEFAULT_BROKER_ID);
    LOGGER.info("Broker response limit is: " + _queryResponseLimit);
    LOGGER.info("Broker timeout is - " + _brokerTimeOutMs + " ms");
    LOGGER.info("Broker id: " + _brokerId);
}

From source file:info.archinnov.achilles.it.TestCRUDSimpleEntity.java

@Test
public void should_insert_if_not_exists() throws Exception {
    //Given//from w ww.ja v  a2s. c o  m
    final long id = 100L;
    final Date date = buildDateKey();
    scriptExecutor.executeScriptTemplate("SimpleEntity/insert_single_row.cql",
            ImmutableMap.of("id", id, "table", "simple"));

    final SimpleEntity entity = new SimpleEntity(id, date, "value");
    final AtomicBoolean error = new AtomicBoolean(false);
    final AtomicLong currentId = new AtomicLong(0L);

    final LWTResultListener lwtListener = new LWTResultListener() {

        @Override
        public void onSuccess() {

        }

        @Override
        public void onError(LWTResult lwtResult) {
            error.getAndSet(true);
            currentId.getAndSet(lwtResult.currentValues().getTyped("id"));
        }
    };

    //When
    manager.crud().insert(entity).ifNotExists().withLwtResultListener(lwtListener).execute();

    //Then
    assertThat(error.get()).isTrue();
    assertThat(currentId.get()).isEqualTo(id);
}

From source file:nl.rivm.cib.episim.model.disease.infection.MSEIRSTest.java

private <T> Observable<Entry<T, Stream<BigDecimal>>> averages(
        final Supplier<Observable<Entry<Double, long[]>>> sir, final Function<Double, T> bins, final int n) {
    return Observable.create(sub -> {
        final NavigableMap<T, long[]> sums = java.util.Collections
                .synchronizedNavigableMap(new TreeMap<T, long[]>());
        final long t0 = System.currentTimeMillis();
        final AtomicInteger iteration = new AtomicInteger();
        final AtomicLong sysTime = new AtomicLong(t0);
        Observable.range(0, n).flatMap(i -> Observable.just(i).subscribeOn(Schedulers.computation()).map(ii -> {
            final int iii = iteration.incrementAndGet();
            final long t = System.currentTimeMillis();
            sysTime.updateAndGet(t1 -> {
                if (t - t1 > 10000) {
                    LOG.trace("Progress {}% at ~{}/s, iteration {} of {}",
                            DecimalUtil.floor(DecimalUtil.divide(iii * 100, n)),
                            DecimalUtil.round(DecimalUtil.divide(iii * 1000, t - t0)), iii, n);
                    return t;
                }/*  w w  w .  j a  v a 2 s. c o m*/
                return t1;
            });
            return sir.get()
                    // group by bin size
                    .groupBy(yt -> bins.apply(yt.getKey()))
                    // take highest floating point t in this bin
                    .flatMap(gr -> gr.reduce((yt1, yt2) -> yt1.getKey().compareTo(yt2.getKey()) > 0 ? yt1 : yt2)
                            .toObservable().map(yt -> Collections.entry(gr.getKey(), yt.getValue())))
                    // add to current sums
                    .collect(() -> sums,
                            (sum, yt) -> sum.compute(yt.getKey(),
                                    (k, v) -> v == null ? yt.getValue()
                                            : IntStream.range(0, v.length)
                                                    .mapToLong(iv -> v[iv] + yt.getValue()[iv]).toArray()))
                    .blockingGet();
        })).blockingSubscribe();

        sums.forEach((k, v) -> sub
                .onNext(Collections.entry(k, Arrays.stream(v).mapToObj(y -> DecimalUtil.divide(y, n)))));
        final long dt = System.currentTimeMillis() - t0;
        LOG.trace("Completed {} iterations in {}s = {}/s", n,
                DecimalUtil.toScale(DecimalUtil.divide(dt, 1000), 1),
                DecimalUtil.round(DecimalUtil.divide(n * 1000, dt)));
    });
}

From source file:io.druid.server.namespace.cache.NamespaceExtractionCacheManagerExecutorsTest.java

@Test(timeout = 50_000)
public void testRepeatSubmission() throws ExecutionException, InterruptedException {
    final int repeatCount = 5;
    final long delay = 5;
    final AtomicLong ranCount = new AtomicLong(0l);
    final long totalRunCount;
    final long start;
    final CountDownLatch latch = new CountDownLatch(repeatCount);
    try {//from ww w .j  a  v  a  2s  .c  o  m
        final URIExtractionNamespace namespace = new URIExtractionNamespace("ns", tmpFile.toURI(),
                new URIExtractionNamespace.ObjectMapperFlatDataParser(
                        URIExtractionNamespaceTest.registerTypes(new ObjectMapper())),
                new Period(delay), null);

        start = System.currentTimeMillis();
        final String cacheId = UUID.randomUUID().toString();
        ListenableFuture<?> future = manager.schedule(namespace, factory, new Runnable() {
            @Override
            public void run() {
                try {
                    manager.getPostRunnable(namespace, factory, cacheId).run();
                    ranCount.incrementAndGet();
                } finally {
                    latch.countDown();
                }
            }
        }, cacheId);
        latch.await();
        long minEnd = start + ((repeatCount - 1) * delay);
        long end = System.currentTimeMillis();
        Assert.assertTrue(String.format("Didn't wait long enough between runs. Expected more than %d was %d",
                minEnd - start, end - start), minEnd < end);
    } finally {
        lifecycle.stop();
    }
    totalRunCount = ranCount.get();
    Thread.sleep(50);
    Assert.assertEquals(totalRunCount, ranCount.get(), 1);
}

From source file:com.linkedin.pinot.requestHandler.BrokerRequestHandler.java

public BrokerRequestHandler(RoutingTable table, TimeBoundaryService timeBoundaryService,
        ScatterGather scatterGatherer, ReduceServiceRegistry reduceServiceRegistry, BrokerMetrics brokerMetrics,
        Configuration config) {/*  w  w  w .ja va2  s.c o  m*/
    _routingTable = table;
    _timeBoundaryService = timeBoundaryService;
    _reduceServiceRegistry = reduceServiceRegistry;
    _scatterGatherer = scatterGatherer;
    _replicaSelection = new RoundRobinReplicaSelection();
    _brokerMetrics = brokerMetrics;
    _config = config;
    _optimizer = new BrokerRequestOptimizer();
    _requestIdGenerator = new AtomicLong(0);
    _queryResponseLimit = _config.getInt(BROKER_QUERY_RESPONSE_LIMIT_CONFIG,
            DEFAULT_BROKER_QUERY_RESPONSE_LIMIT);
    _brokerTimeOutMs = _config.getLong(BROKER_TIME_OUT_CONFIG, DEFAULT_BROKER_TIME_OUT_MS);
    _brokerId = _config.getString(BROKER_ID_CONFIG_KEY, DEFAULT_BROKER_ID);
    LOGGER.info("Broker response limit is: " + _queryResponseLimit);
    LOGGER.info("Broker timeout is - " + _brokerTimeOutMs + " ms");
    LOGGER.info("Broker id: " + _brokerId);
}

From source file:org.apache.bookkeeper.common.util.TestBackoff.java

@Test
public void testDecorrelatedJittered() throws Exception {
    long startMs = ThreadLocalRandom.current().nextLong(1L, 1000L);
    long maxMs = ThreadLocalRandom.current().nextLong(startMs, startMs * 2);
    Stream<Long> backoffs = Backoff.decorrelatedJittered(startMs, maxMs).limit(10);
    Iterator<Long> backoffIter = backoffs.iterator();
    assertTrue(backoffIter.hasNext());/*from w  ww.j  av a  2s .  c  o m*/
    assertEquals(startMs, backoffIter.next().longValue());
    AtomicLong prevMs = new AtomicLong(startMs);
    backoffIter.forEachRemaining(backoffMs -> {
        assertTrue(backoffMs >= startMs);
        assertTrue(backoffMs <= prevMs.get() * 3);
        assertTrue(backoffMs <= maxMs);
        prevMs.set(backoffMs);
    });
}

From source file:org.apache.phoenix.execute.HashJoinPlan.java

@Override
public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException {
    int count = subPlans.length;
    PhoenixConnection connection = getContext().getConnection();
    ConnectionQueryServices services = connection.getQueryServices();
    ExecutorService executor = services.getExecutor();
    List<Future<Object>> futures = Lists.<Future<Object>>newArrayListWithExpectedSize(count);
    dependencies = Lists.newArrayList();
    if (joinInfo != null) {
        hashClient = hashClient != null ? hashClient
                : new HashCacheClient(delegate.getContext().getConnection());
        firstJobEndTime = new AtomicLong(0);
        keyRangeExpressions = new CopyOnWriteArrayList<Expression>();
    }// w w w. j ava  2 s  .  c  o m

    for (int i = 0; i < count; i++) {
        final int index = i;
        futures.add(executor.submit(new JobCallable<Object>() {

            @Override
            public Object call() throws Exception {
                return subPlans[index].execute(HashJoinPlan.this);
            }

            @Override
            public Object getJobId() {
                return HashJoinPlan.this;
            }

            @Override
            public TaskExecutionMetricsHolder getTaskExecutionMetric() {
                return NO_OP_INSTANCE;
            }
        }));
    }

    SQLException firstException = null;
    for (int i = 0; i < count; i++) {
        try {
            Object result = futures.get(i).get();
            subPlans[i].postProcess(result, this);
        } catch (InterruptedException e) {
            if (firstException == null) {
                firstException = new SQLException("Sub plan [" + i + "] execution interrupted.", e);
            }
        } catch (ExecutionException e) {
            if (firstException == null) {
                firstException = new SQLException("Encountered exception in sub plan [" + i + "] execution.",
                        e.getCause());
            }
        }
    }
    if (firstException != null) {
        SQLCloseables.closeAllQuietly(dependencies);
        throw firstException;
    }

    Expression postFilter = null;
    boolean hasKeyRangeExpressions = keyRangeExpressions != null && !keyRangeExpressions.isEmpty();
    if (recompileWhereClause || hasKeyRangeExpressions) {
        StatementContext context = delegate.getContext();
        PTable table = context.getCurrentTable().getTable();
        ParseNode viewWhere = table.getViewStatement() == null ? null
                : new SQLParser(table.getViewStatement()).parseQuery().getWhere();
        context.setResolver(FromCompiler.getResolverForQuery((SelectStatement) (delegate.getStatement()),
                delegate.getContext().getConnection()));
        if (recompileWhereClause) {
            postFilter = WhereCompiler.compile(delegate.getContext(), delegate.getStatement(), viewWhere, null);
        }
        if (hasKeyRangeExpressions) {
            WhereCompiler.compile(delegate.getContext(), delegate.getStatement(), viewWhere,
                    keyRangeExpressions, true, null);
        }
    }

    if (joinInfo != null) {
        Scan scan = delegate.getContext().getScan();
        HashJoinInfo.serializeHashJoinIntoScan(scan, joinInfo);
    }

    ResultIterator iterator = joinInfo == null ? delegate.iterator(scanGrouper)
            : ((BaseQueryPlan) delegate).iterator(dependencies, scanGrouper);
    if (statement.getInnerSelectStatement() != null && postFilter != null) {
        iterator = new FilterResultIterator(iterator, postFilter);
    }

    return iterator;
}

From source file:com.amazonaws.services.dynamodbv2.replication.impl.ShardSubscriberImpl.java

/**
 * Constructs a Subscriber that creates Checkpoints based on the provided factory.
 *
 * @param tableName/*from  ww w. j av a2 s.  c  om*/
 *            The table name
 * @param multiRegionCheckpointFactory
 *            The factory for producing {@link MultiRegionCheckpoint}s
 * @param replicationWorker
 *            The {@link RegionReplicationWorkers} that manages this subscriber
 * @param timeBetweenSweeps
 *            The time in millisecond between scans for successfully replicated updates.
 * @param checkpointBackoffTime
 *            The backoff time in millisecond before retrying a checkpoint
 */
public ShardSubscriberImpl(final String tableName,
        final MultiRegionCheckpointFactory multiRegionCheckpointFactory,
        final RegionReplicationWorker replicationWorker, final long timeBetweenSweeps,
        final long checkpointBackoffTime) {
    this.tableName = tableName;
    this.multiRegionCheckpointFactory = multiRegionCheckpointFactory;
    this.replicationWorker = replicationWorker;
    this.timeBetweenSweeps = timeBetweenSweeps;
    this.checkpointBackoffTime = checkpointBackoffTime;
    checkpointer = null;

    checkpoints = new ConcurrentSkipListMap<String, MultiRegionCheckpoint>(new SequenceNumberComparator());
    sweeper = null;
    cloudWatchClient = replicationWorker.getReplicationConfiguration()
            .getCloudWatchClient(replicationWorker.getRegionName(), tableName);
    userWriteCount = new AtomicLong(0);
}