Example usage for java.util.concurrent.atomic AtomicLong get

List of usage examples for java.util.concurrent.atomic AtomicLong get

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicLong get.

Prototype

public final long get() 

Source Link

Document

Returns the current value, with memory effects as specified by VarHandle#getVolatile .

Usage

From source file:org.lendingclub.mercator.docker.SwarmScanner.java

long saveDockerNode(String swarmClusterId, JsonNode n) {

    String swarmNodeId = n.get("swarmNodeId").asText();
    AtomicLong updateTs = new AtomicLong(Long.MAX_VALUE);
    dockerScanner.getNeoRxClient().execCypher(
            "merge (n:DockerHost {swarmNodeId:{nodeId}}) set n+={props}, n.updateTs=timestamp() return n",
            "nodeId", swarmNodeId, "props", n).forEach(actual -> {
                removeDockerLabels("DockerHost", "swarmNodeId", swarmNodeId, n, actual);
                updateTs.set(Math.min(updateTs.get(), actual.path("updateTs").asLong(Long.MAX_VALUE)));
            });/*from  ww  w. jav a 2 s .  c o  m*/

    logger.info("connecting swarm={} to node={}", swarmClusterId, swarmNodeId);
    dockerScanner.getNeoRxClient().execCypher(
            "match (s:DockerSwarm {swarmClusterId:{swarmClusterId}}), (n:DockerHost {swarmNodeId:{nodeId}}) merge (s)-[x:CONTAINS]->(n) set x.updateTs=timestamp()",
            "swarmClusterId", swarmClusterId, "nodeId", swarmNodeId);
    return updateTs.get();

}

From source file:org.apache.activemq.artemis.tests.integration.persistence.metrics.JournalPendingMessageTest.java

@Test
public void testQueueMessageSizePersistentAndNonPersistent() throws Exception {

    AtomicLong publishedNonPersistentMessageSize = new AtomicLong();
    AtomicLong publishedMessageSize = new AtomicLong();

    publishTestQueueMessages(100, DeliveryMode.PERSISTENT, publishedMessageSize);
    publishTestQueueMessages(100, DeliveryMode.NON_PERSISTENT, publishedNonPersistentMessageSize);
    verifyPendingStats(defaultQueueName, 200,
            publishedMessageSize.get() + publishedNonPersistentMessageSize.get());
    verifyPendingDurableStats(defaultQueueName, 100, publishedMessageSize.get());
}

From source file:org.apache.bookkeeper.bookie.InterleavedLedgerStorageTest.java

@Test
public void testShellCommands() throws Exception {
    interleavedStorage.flush();// www.  j  a v a  2 s . c  o  m
    interleavedStorage.shutdown();
    final Pattern entryPattern = Pattern
            .compile("entry (?<entry>\\d+)\t:\t((?<na>N/A)|\\(log:(?<logid>\\d+), pos: (?<pos>\\d+)\\))");

    class Metadata {
        final Pattern keyPattern = Pattern.compile("master key +: ([0-9a-f])");
        final Pattern sizePattern = Pattern.compile("size +: (\\d+)");
        final Pattern entriesPattern = Pattern.compile("entries +: (\\d+)");
        final Pattern isFencedPattern = Pattern.compile("isFenced +: (\\w+)");

        public String masterKey;
        public long size = -1;
        public long entries = -1;
        public boolean foundFenced = false;

        void check(String s) {
            Matcher keyMatcher = keyPattern.matcher(s);
            if (keyMatcher.matches()) {
                masterKey = keyMatcher.group(1);
                return;
            }

            Matcher sizeMatcher = sizePattern.matcher(s);
            if (sizeMatcher.matches()) {
                size = Long.valueOf(sizeMatcher.group(1));
                return;
            }

            Matcher entriesMatcher = entriesPattern.matcher(s);
            if (entriesMatcher.matches()) {
                entries = Long.valueOf(entriesMatcher.group(1));
                return;
            }

            Matcher isFencedMatcher = isFencedPattern.matcher(s);
            if (isFencedMatcher.matches()) {
                Assert.assertEquals("true", isFencedMatcher.group(1));
                foundFenced = true;
                return;
            }
        }

        void validate(long foundEntries) {
            Assert.assertTrue(entries >= numWrites * entriesPerWrite);
            Assert.assertEquals(entries, foundEntries);
            Assert.assertTrue(foundFenced);
            Assert.assertNotEquals(-1, size);
        }
    }
    final Metadata foundMetadata = new Metadata();

    AtomicLong curEntry = new AtomicLong(0);
    AtomicLong someEntryLogger = new AtomicLong(-1);
    BookieShell shell = new BookieShell(LedgerIdFormatter.LONG_LEDGERID_FORMATTER,
            EntryFormatter.STRING_FORMATTER) {
        @Override
        void printInfoLine(String s) {
            Matcher matcher = entryPattern.matcher(s);
            System.out.println(s);
            if (matcher.matches()) {
                assertEquals(Long.toString(curEntry.get()), matcher.group("entry"));

                if (matcher.group("na") == null) {
                    String logId = matcher.group("logid");
                    Assert.assertNotEquals(matcher.group("logid"), null);
                    Assert.assertNotEquals(matcher.group("pos"), null);
                    Assert.assertTrue((curEntry.get() % entriesPerWrite) == 0);
                    Assert.assertTrue(curEntry.get() <= numWrites * entriesPerWrite);
                    if (someEntryLogger.get() == -1) {
                        someEntryLogger.set(Long.valueOf(logId));
                    }
                } else {
                    Assert.assertEquals(matcher.group("logid"), null);
                    Assert.assertEquals(matcher.group("pos"), null);
                    Assert.assertTrue(((curEntry.get() % entriesPerWrite) != 0)
                            || ((curEntry.get() >= (entriesPerWrite * numWrites))));
                }
                curEntry.incrementAndGet();
            } else {
                foundMetadata.check(s);
            }
        }
    };
    shell.setConf(conf);
    int res = shell.run(new String[] { "ledger", "-m", "0" });
    Assert.assertEquals(0, res);
    Assert.assertTrue(curEntry.get() >= numWrites * entriesPerWrite);
    foundMetadata.validate(curEntry.get());

    // Should pass consistency checker
    res = shell.run(new String[] { "localconsistencycheck" });
    Assert.assertEquals(0, res);

    // Remove a logger
    EntryLogger entryLogger = new EntryLogger(conf);
    entryLogger.removeEntryLog(someEntryLogger.get());

    // Should fail consistency checker
    res = shell.run(new String[] { "localconsistencycheck" });
    Assert.assertEquals(1, res);
}

From source file:org.apache.hadoop.ipc.DecayRpcScheduler.java

/**
 * Update the scheduleCache to match current conditions in callCounts.
 *///  w w  w  .j  a v  a 2 s  .  c o m
private void recomputeScheduleCache() {
    Map<Object, Integer> nextCache = new HashMap<Object, Integer>();

    for (Map.Entry<Object, List<AtomicLong>> entry : callCounts.entrySet()) {
        Object id = entry.getKey();
        AtomicLong value = entry.getValue().get(0);

        long snapshot = value.get();
        int computedLevel = computePriorityLevel(snapshot);

        nextCache.put(id, computedLevel);
    }

    // Swap in to activate
    scheduleCacheRef.set(Collections.unmodifiableMap(nextCache));
}

From source file:org.polymap.p4.data.importer.ImportPanel.java

@Override
public void uploadStarted(ClientFile clientFile, InputStream in) throws Exception {
    log.info(clientFile.getName() + " - " + clientFile.getType() + " - " + clientFile.getSize());

    uploadProgress(resultSection.getBody(), "Uploading ...");

    // upload file
    assert clientFile.getName() != null : "Null client file name is not supported yet.";
    File f = new File(tempDir, clientFile.getName());
    try (OutputStream out = new FileOutputStream(f)) {
        Timer timer = new Timer();
        byte[] buf = new byte[4096];
        AtomicLong count = new AtomicLong();
        for (int c = in.read(buf); c > -1; c = in.read(buf)) {
            out.write(buf, 0, c);/*from  ww w .  j  a  v  a2 s  .com*/
            count.addAndGet(c);

            if (timer.elapsedTime() > 2000) {
                Composite parent = resultSection.getBody();
                if (parent.isDisposed()) {
                    break; // stop uploading
                } else {
                    uploadProgress(resultSection.getBody(),
                            "Uploading ..." + byteCountToDisplaySize(count.get()));
                    timer.start();
                }
            }
        }
        uploadProgress(resultSection.getBody(), "Upload ...complete.");
    } catch (Exception e) {
        uploadProgress(resultSection.getBody(), "Upload ...failed.");
        async(() -> site().toolkit().createSnackbar(Appearance.FadeIn, "Unable to upload file."));
        return;
    }

    async(() -> {
        // fires event which triggers UI update in ImportsContentProvider
        context.addContextOut(f);
    });
}

From source file:org.sonar.server.benchmark.SourceDbBenchmarkTest.java

private void scrollRows() throws SQLException {
    LOGGER.info("Scroll table FILE_SOURCES");
    DbClient dbClient = new DbClient(dbTester.database(), dbTester.myBatis());
    Connection connection = dbTester.openConnection();
    AtomicLong counter = new AtomicLong();
    ProgressTask progress = new ProgressTask(LOGGER, "source file", counter);
    Timer timer = new Timer("SourceDbScroll");
    timer.schedule(progress, ProgressTask.PERIOD_MS, ProgressTask.PERIOD_MS);

    try {/*  w w  w. jav a2s  .  com*/
        long start = System.currentTimeMillis();
        SourceLineResultSetIterator it = SourceLineResultSetIterator.create(dbClient, connection, 0L);
        while (it.hasNext()) {
            SourceLineResultSetIterator.SourceFile row = it.next();
            assertThat(row.getLines().size()).isEqualTo(3220);
            assertThat(row.getFileUuid()).isNotEmpty();
            counter.incrementAndGet();
        }
        long end = System.currentTimeMillis();
        long period = end - start;
        long throughputPerSecond = 1000L * counter.get() / period;
        LOGGER.info(String.format("%d FILE_SOURCES rows scrolled in %d ms (%d rows/second)", counter.get(),
                period, throughputPerSecond));

    } finally {
        DbUtils.closeQuietly(connection);
        timer.cancel();
    }
}

From source file:com.antsdb.saltedfish.nosql.Gobbler.java

/**
 * return -1 if there is no valid sp found meaning this is an empty database
 * @throws Exception //  ww  w .j  a v  a 2  s. c o m
 */
public long getLatestSp() {
    long sp = this.spaceman.getAllocationPointer();
    int spaceId = SpaceManager.getSpaceId(sp);
    long spaceStartSp = this.spaceman.getSpaceStartSp(spaceId);
    if (spaceStartSp == sp) {
        // if current space is empty, wait a little
        try {
            Thread.sleep(1000);
        } catch (InterruptedException e) {
        }
    }
    AtomicLong result = new AtomicLong(-1);
    try {
        this.replay(spaceStartSp, true, new ReplayHandler() {
            @Override
            public void all(LogEntry entry) {
                result.set(entry.getSpacePointer());
            }
        });
    } catch (Exception ignored) {
    }
    return result.get();
}

From source file:jduagui.Controller.java

public static long getSize(String startPath, Map<String, Long> dirs, Map<String, Long> files)
        throws IOException {
    final AtomicLong size = new AtomicLong(0);
    final AtomicLong subdirs = new AtomicLong(0);
    final AtomicLong fs = new AtomicLong(0);
    final File f = new File(startPath);
    final String str = "";
    Path path = Paths.get(startPath);

    Files.walkFileTree(path, new SimpleFileVisitor<Path>() {
        @Override/*from  w w w .  j a v a2 s.c  o m*/
        public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) {
            subdirs.incrementAndGet();
            return FileVisitResult.CONTINUE;
        }

        @Override
        public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
            fs.incrementAndGet();
            size.addAndGet(attrs.size());
            return FileVisitResult.CONTINUE;
        }

        @Override
        public FileVisitResult visitFileFailed(Path file, IOException exc) throws IOException {
            fs.incrementAndGet();
            return FileVisitResult.CONTINUE;
        }
    });
    if (subdirs.decrementAndGet() == -1)
        subdirs.incrementAndGet();

    if (f.isDirectory()) {
        dirs.put(startPath, subdirs.get());
        files.put(startPath, fs.get());
    }
    return size.get();
}

From source file:org.codice.solr.factory.impl.SolrClientAdapter.java

@VisibleForTesting
boolean wasNotRecent(AtomicLong previous, long freq) {
    final long now = System.currentTimeMillis();

    if (now == previous.get()) {
        return false;
    }//from ww  w .ja v a  2s  . c  om
    // update if not recent (i.e. if the last occurrence was older than the specified frequency
    return previous.accumulateAndGet(now, (last, n) -> ((now - last) >= freq) ? now : last) == now;
}

From source file:org.apache.activemq.artemis.tests.integration.persistence.metrics.JournalPendingMessageTest.java

@Test
public void testMessageSizeOneDurablePartialConsumption() throws Exception {
    AtomicLong publishedMessageSize = new AtomicLong();

    Connection connection = cf.createConnection();
    connection.setClientID("clientId");
    connection.start();/*from   w w w.  ja  v  a 2  s  . c om*/

    publishTestMessagesDurable(connection, new String[] { "sub1" }, 200, publishedMessageSize,
            DeliveryMode.PERSISTENT, false);

    verifyPendingStats(defaultTopicName, 200, publishedMessageSize.get());
    verifyPendingDurableStats(defaultTopicName, 200, publishedMessageSize.get());

    // consume partial messages
    consumeDurableTestMessages(connection, "sub1", 50, publishedMessageSize);

    // 150 should be left
    verifyPendingStats(defaultTopicName, 150, publishedMessageSize.get());
    // We don't really know the size here but it should be smaller than before
    // so take an average
    verifyPendingDurableStats(defaultTopicName, 150, (long) (.75 * publishedMessageSize.get()));

    connection.close();
}