Example usage for java.util.concurrent.atomic AtomicLong AtomicLong

List of usage examples for java.util.concurrent.atomic AtomicLong AtomicLong

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicLong AtomicLong.

Prototype

public AtomicLong(long initialValue) 

Source Link

Document

Creates a new AtomicLong with the given initial value.

Usage

From source file:com.github.jackygurui.vertxredissonrepository.repository.SaveAndGetCustomerConcurrentWaterfallTest.java

@Test
public void test4SaveAndGetCustomerConcurrentWaterfall(TestContext context) throws Exception {
    Async async = context.async();/* w w w.  j  a v  a2  s . c  o m*/
    JsonNode source = JsonLoader.fromResource("/Customer.json");
    int records = 10000;
    HanyuPinyin.convert("");//warm up
    AtomicLong counter = new AtomicLong(0);
    StopWatch sw = new StopWatch();
    sw.start();
    IntStream.rangeClosed(1, records).parallel().forEach(e -> {
        JsonObject clone = new JsonObject(Json.encode(source));
        clone.getJsonObject("personalDetails").put("phoneNumber",
                ((Long.parseLong(clone.getJsonObject("personalDetails").getString("phoneNumber")) + 5000 + e)
                        + ""));
        org.simondean.vertx.async.Async.waterfall().<String>task(t -> {
            customerRepository.create(Json.encode(clone), t);
        }).<Customer>task((id, t) -> {
            customerRepository.get(id, t);
        }).run(rr -> {
            long ct = counter.incrementAndGet();
            //                logger.info("Counter = " + ct + " | success = " + !r.failed());
            if (rr.succeeded()) {
                try {
                    Customer loaded = rr.result();
                    Customer c = Json.decodeValue(clone.encode(), Customer.class);
                    c.setId(loaded.getId());
                    c.getAddressDetails().setId(loaded.getId());
                    c.getPersonalDetails().setId(loaded.getId());
                    String encoded = Json.encode(c);
                    if (!rr.result().equals(encoded)) {
                        logger.info(loaded.getId() + " - SOURCE : " + encoded);
                        logger.info(loaded.getId() + " - RESULT : " + rr.result());
                    }
                    context.assertEquals(Json.encode(rr.result()), encoded);
                } catch (Exception ex) {
                    context.fail(ex);
                    async.complete();
                }
            } else {
                context.fail(rr.cause());
                async.complete();
            }
            if (ct == records) {
                sw.stop();
                logger.info("time to concurrently save and get using waterfall " + records
                        + " customer records: " + sw.getTime());
                async.complete();
            }
        });
    });
}

From source file:org.apache.flink.monitor.trackers.regression.OLSRegressionTest.java

@Test
public void shouldFitHistogram() throws IOException {
    Map<Long, AtomicLong> exactH = new HashMap<Long, AtomicLong>();

    Tuple3<Long, Long, String> r = new Tuple3<Long, Long, String>();
    int counter = 1;
    while (!this.oneKTuples.reachedEnd()) {
        r = this.oneKTuples.nextRecord(r);
        if (r == null) {
            break;
        }/*  w  w w .j a  va 2s. com*/
        if (r.getField(1) != null && (Long) r.getField(1) > 0) {
            if (exactH.containsKey(r.getField(1))) {
                exactH.get(r.getField(1)).incrementAndGet();
            } else {
                exactH.put((Long) r.getField(1), new AtomicLong(1));
            }
        }
    }

    double[] xVector = new double[exactH.size()];
    double[] yVector = new double[exactH.size()];
    int index = 0;
    for (Entry<Long, AtomicLong> e : exactH.entrySet()) {
        xVector[index] = e.getKey();
        yVector[index++] = e.getValue().doubleValue();
    }
    long before = System.currentTimeMillis();

    OLSTrendLine t = new PolyTrendLine(10);
    t.setValues(yVector, xVector);

    long after = System.currentTimeMillis();
    System.out.println("Took " + (after - before) / 1000 + " seconds to compute fit.");

    File histoFile = new File("./fitted");
    FileWriter writer = new FileWriter(histoFile);

    long valueSum = 0;
    double sumSquaredError = 0.0;
    int i = 0;
    long maxValue = -1;
    for (Long k : exactH.keySet()) {
        double actual = exactH.get(k).doubleValue();
        double prediction = t.predict(k);
        sumSquaredError += (prediction - actual) * (prediction - actual);
        valueSum += exactH.get(k).longValue();
        writer.write("" + k + "\t" + actual + "\n");
        if (k > maxValue) {
            maxValue = k;
        }
        if (i++ % 100000 == 0) {
            writer.flush();
        }
    }

    writer.flush();
    writer.close();

    double[] coef = t.getCoef().getColumn(0);
    for (int j = 0; j < coef.length; j++) {
        System.out.println(coef[j] + " ");
    }
    System.out.println("maxValue " + maxValue);
    System.out.println("sum square errors: " + sumSquaredError + " with acc values: " + valueSum);
}

From source file:alluxio.worker.block.meta.StorageDir.java

private StorageDir(StorageTier tier, int dirIndex, long capacityBytes, String dirPath) {
    mTier = Preconditions.checkNotNull(tier);
    mDirIndex = dirIndex;/* w w w .j  a v a  2  s  .c om*/
    mCapacityBytes = capacityBytes;
    mAvailableBytes = new AtomicLong(capacityBytes);
    mCommittedBytes = new AtomicLong(0);
    mDirPath = dirPath;
    mBlockIdToBlockMap = new HashMap<>(200);
    mBlockIdToTempBlockMap = new HashMap<>(200);
    mSessionIdToTempBlockIdsMap = new HashMap<>(200);
}

From source file:com.taobao.tddl.interact.monitor.TotalStatMonitor.java

/**
 * normal db tab access counter//from  ww w  .  j av  a  2s .com
 * 
 * @param key
 */
public static void dbTabIncrement(String key) {
    AtomicLong incre = dbTabMap.putIfAbsent(key, new AtomicLong(0));
    if (incre != null) {
        incre.addAndGet(1);
    }
}

From source file:org.apache.synapse.commons.throttle.core.CallerContext.java

public CallerContext clone() throws CloneNotSupportedException {
    super.clone();
    CallerContext clone = new CallerContext(this.id) {
        @Override//  w ww  . j av  a  2  s . c  o  m
        public int getType() {
            return CallerContext.this.getType();
        }
    };
    clone.nextAccessTime = this.nextAccessTime;
    clone.firstAccessTime = this.firstAccessTime;
    clone.nextTimeWindow = this.nextTimeWindow;
    clone.globalCount = new AtomicLong(this.globalCount.longValue());
    clone.localCount = new AtomicLong(this.localCount.longValue());

    localCount.set(0);
    return clone;
}

From source file:org.lendingclub.mercator.docker.SwarmScanner.java

long saveDockerNode(String swarmClusterId, JsonNode n) {

    String swarmNodeId = n.get("swarmNodeId").asText();
    AtomicLong updateTs = new AtomicLong(Long.MAX_VALUE);
    dockerScanner.getNeoRxClient().execCypher(
            "merge (n:DockerHost {swarmNodeId:{nodeId}}) set n+={props}, n.updateTs=timestamp() return n",
            "nodeId", swarmNodeId, "props", n).forEach(actual -> {
                removeDockerLabels("DockerHost", "swarmNodeId", swarmNodeId, n, actual);
                updateTs.set(Math.min(updateTs.get(), actual.path("updateTs").asLong(Long.MAX_VALUE)));
            });/*from  w  ww. j a  v  a2  s. co m*/

    logger.info("connecting swarm={} to node={}", swarmClusterId, swarmNodeId);
    dockerScanner.getNeoRxClient().execCypher(
            "match (s:DockerSwarm {swarmClusterId:{swarmClusterId}}), (n:DockerHost {swarmNodeId:{nodeId}}) merge (s)-[x:CONTAINS]->(n) set x.updateTs=timestamp()",
            "swarmClusterId", swarmClusterId, "nodeId", swarmNodeId);
    return updateTs.get();

}

From source file:org.apache.hadoop.hbase.regionserver.Segment.java

protected Segment(Segment segment) {
    this.cellSet = segment.getCellSet();
    this.comparator = segment.getComparator();
    this.minSequenceId = segment.getMinSequenceId();
    this.memStoreLAB = segment.getMemStoreLAB();
    this.size = new AtomicLong(segment.getSize());
    this.tagsPresent = segment.isTagsPresent();
    this.timeRangeTracker = segment.getTimeRangeTracker();
}

From source file:org.apache.spark.network.server.OneForOneStreamManager.java

public OneForOneStreamManager() {
    // For debugging purposes, start with a random stream id to help identifying different streams.
    // This does not need to be globally unique, only unique to this class.
    nextStreamId = new AtomicLong((long) new Random().nextInt(Integer.MAX_VALUE) * 1000);
    streams = new ConcurrentHashMap<>();
}

From source file:com.github.brandtg.switchboard.LogPuller.java

@Override
public void run() {
    HttpClient httpClient = HttpClients.createDefault();
    AtomicLong currentIndex = new AtomicLong(lastIndex);
    HttpHost host = new HttpHost(sourceAddress.getAddress(), sourceAddress.getPort());
    boolean firstLoop = true;

    while (!isShutdown.get()) {
        // Build URI
        StringBuilder sb = new StringBuilder();
        try {//w w w . jav a  2 s .c o  m
            if (firstLoop) {
                sb.append("/log/metadata/header?target=").append(sinkAddress.getHostName()).append(":")
                        .append(sinkAddress.getPort());
                firstLoop = false;
            } else {
                sb.append("/log/").append(URLEncoder.encode(collection, ENCODING)).append("/")
                        .append(currentIndex.get()).append("?target=").append(sinkAddress.getHostName())
                        .append(":").append(sinkAddress.getPort());
            }
        } catch (Exception e) {
            throw new RuntimeException(e);
        }

        // TODO: Clean this up a little bit
        int resStatus = -1;
        synchronized (this) {
            HttpEntity entity = null;
            try {
                // Get data
                URI uri = URI.create(sb.toString());
                HttpGet req = new HttpGet(uri);
                HttpResponse res = httpClient.execute(host, req);
                entity = res.getEntity();
                resStatus = res.getStatusLine().getStatusCode();
                if (resStatus == 200) {
                    // Wait for data to be consumed
                    // n.b. This object will be registered as a listener
                    wait();

                    // Update position
                    InputStream inputStream = res.getEntity().getContent();
                    LogRegionResponse metadata = OBJECT_MAPPER.readValue(inputStream, LogRegionResponse.class);
                    currentIndex
                            .set(metadata.getLogRegions().get(metadata.getLogRegions().size() - 1).getIndex());

                    for (LogRegion logRegion : metadata.getLogRegions()) {
                        LOG.info("Received {}", logRegion);
                    }
                }
            } catch (Exception e) {
                LOG.error("Error", e);
            } finally {
                if (entity != null) {
                    try {
                        EntityUtils.consume(entity);
                    } catch (IOException e) {
                        LOG.error("Error", e);
                    }
                }
            }
        }

        // Sleep if did not get data
        if (resStatus != 200) {
            try {
                LOG.debug("No data available, sleeping 1000 ms");
                Thread.sleep(1000);
            } catch (InterruptedException e) {
                LOG.warn("Error while sleeping for more data", e);
            }
        }
    }
}

From source file:org.apache.shindig.gadgets.http.DefaultInvalidationService.java

@Inject
public DefaultInvalidationService(HttpCache httpCache, CacheProvider cacheProvider) {
    // Initialize to current time to mimimize conflict with persistent caches
    this(httpCache, cacheProvider, new AtomicLong(System.currentTimeMillis()));
}