Example usage for java.util.concurrent.atomic AtomicLong get

List of usage examples for java.util.concurrent.atomic AtomicLong get

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicLong get.

Prototype

public final long get() 

Source Link

Document

Returns the current value, with memory effects as specified by VarHandle#getVolatile .

Usage

From source file:org.lendingclub.mercator.docker.SwarmScanner.java

public void scanTasksForSwarm(String swarmClusterId) {

    logger.info("scanning tasks for swarm: {}", swarmClusterId);

    AtomicLong earlistUpdate = new AtomicLong(Long.MAX_VALUE);
    AtomicBoolean error = new AtomicBoolean(false);
    JsonNode response = getRestClient().getTasks();
    response.forEach(it -> {/*from  w  w  w. j  a  v  a 2 s . com*/
        try {
            earlistUpdate.set(Math.min(earlistUpdate.get(), saveTask(it)));

        } catch (Exception e) {
            logger.warn("problem updating task", e);
            error.set(true);
        }
    });

    if (error.get() == false) {
        if (earlistUpdate.get() < System.currentTimeMillis()) {
            dockerScanner.getNeoRxClient().execCypher(
                    "match (x:DockerTask) where x.swarmClusterId={swarmClusterId} and x.updateTs<{cutoff} detach delete x",
                    "cutoff", earlistUpdate.get(), "swarmClusterId", swarmClusterId);
        }
    }

}

From source file:org.apache.activemq.artemis.tests.integration.persistence.metrics.JournalPendingMessageTest.java

@Test
public void testQueueMessageSizeAfterConsumption() throws Exception {
    AtomicLong publishedMessageSize = new AtomicLong();

    publishTestQueueMessages(200, publishedMessageSize);
    verifyPendingStats(defaultQueueName, 200, publishedMessageSize.get());
    verifyPendingDurableStats(defaultQueueName, 200, publishedMessageSize.get());

    consumeTestQueueMessages(200);/*  w  w  w.j  a  va 2 s .c  om*/

    verifyPendingStats(defaultQueueName, 0, 0);
    verifyPendingDurableStats(defaultQueueName, 0, 0);
}

From source file:org.apache.hadoop.hbase.client.TestAsyncTable.java

@Test
public void testIncrement() throws InterruptedException, ExecutionException {
    AsyncTableBase table = getTable.get();
    int count = 100;
    CountDownLatch latch = new CountDownLatch(count);
    AtomicLong sum = new AtomicLong(0L);
    IntStream.range(0, count)/*from  w  w w  .j a va  2  s . co m*/
            .forEach(i -> table.incrementColumnValue(row, FAMILY, QUALIFIER, 1).thenAccept(x -> {
                sum.addAndGet(x);
                latch.countDown();
            }));
    latch.await();
    assertEquals(count, Bytes
            .toLong(table.get(new Get(row).addColumn(FAMILY, QUALIFIER)).get().getValue(FAMILY, QUALIFIER)));
    assertEquals((1 + count) * count / 2, sum.get());
}

From source file:org.apache.activemq.artemis.tests.integration.persistence.metrics.JournalPendingMessageTest.java

@Test
public void testQueueMessageSize() throws Exception {
    AtomicLong publishedMessageSize = new AtomicLong();

    publishTestQueueMessages(200, publishedMessageSize);

    verifyPendingStats(defaultQueueName, 200, publishedMessageSize.get());
    verifyPendingDurableStats(defaultQueueName, 200, publishedMessageSize.get());

    this.killServer();
    this.restartServer();

    verifyPendingStats(defaultQueueName, 200, publishedMessageSize.get());
    verifyPendingDurableStats(defaultQueueName, 200, publishedMessageSize.get());
}

From source file:org.apache.activemq.artemis.tests.integration.persistence.metrics.JournalPendingMessageTest.java

@Test
public void testQueueMessageSizeTx() throws Exception {
    AtomicLong publishedMessageSize = new AtomicLong();

    publishTestQueueMessagesTx(200, publishedMessageSize);

    verifyPendingStats(defaultQueueName, 200, publishedMessageSize.get());
    verifyPendingDurableStats(defaultQueueName, 200, publishedMessageSize.get());

    this.killServer();
    this.restartServer();

    verifyPendingStats(defaultQueueName, 200, publishedMessageSize.get());
    verifyPendingDurableStats(defaultQueueName, 200, publishedMessageSize.get());
}

From source file:com.facebook.LinkBench.LinkBenchDriver.java

/**
 * Start all runnables at the same time. Then block till all
 * tasks are completed. Returns the elapsed time (in millisec)
 * since the start of the first task to the completion of all tasks.
 *///w  w  w  . j  a v  a 2s . co m
static long concurrentExec(final List<? extends Runnable> tasks) throws Throwable {
    final CountDownLatch startSignal = new CountDownLatch(tasks.size());
    final CountDownLatch doneSignal = new CountDownLatch(tasks.size());
    final AtomicLong startTime = new AtomicLong(0);
    for (final Runnable task : tasks) {
        new Thread(new Runnable() {
            @Override
            public void run() {
                /*
                 * Run a task.  If an uncaught exception occurs, bail
                 * out of the benchmark immediately, since any results
                 * of the benchmark will no longer be valid anyway
                 */
                try {
                    startSignal.countDown();
                    startSignal.await();
                    long now = System.currentTimeMillis();
                    startTime.compareAndSet(0, now);
                    task.run();
                } catch (Throwable e) {
                    Logger threadLog = Logger.getLogger(ConfigUtil.LINKBENCH_LOGGER);
                    threadLog.error("Unrecoverable exception in worker thread:", e);
                    Runtime.getRuntime().halt(1);
                }
                doneSignal.countDown();
            }
        }).start();
    }
    doneSignal.await(); // wait for all threads to finish
    long endTime = System.currentTimeMillis();
    return endTime - startTime.get();
}

From source file:org.neo4j.index.population.LucenePartitionedIndexStressTesting.java

private PopulationResult populateDb(GraphDatabaseService db) throws ExecutionException, InterruptedException {
    AtomicLong nodesCounter = new AtomicLong();

    List<Future<Long>> futures = new ArrayList<>(NUMBER_OF_POPULATORS);
    for (int i = 0; i < NUMBER_OF_POPULATORS; i++) {
        futures.add(populators.submit(new Populator(i, NUMBER_OF_POPULATORS, db, nodesCounter)));
    }//from   w ww  . jav  a 2  s .com

    long maxPropertyId = 0;
    for (Future<Long> future : futures) {
        maxPropertyId = Math.max(maxPropertyId, future.get());
    }
    return new PopulationResult(maxPropertyId, nodesCounter.get());
}

From source file:io.hummer.util.ws.WebServiceClient.java

private void pauseToAvoidSpamming() throws Exception {
    long minIntervalMS = 5000;
    long otherwiseSleepMS = 1500;
    long maxStoredHosts = 20;

    String host = new URL(this.endpointURL).getHost();
    synchronized (lastRequestedHosts) {
        if (!lastRequestedHosts.containsKey(host)) {
            lastRequestedHosts.put(host, new AtomicLong(System.currentTimeMillis()));
            return;
        }/* w w  w .  ja  v a  2 s .  c  o  m*/
    }
    AtomicLong time = lastRequestedHosts.get(host);
    synchronized (time) {
        if ((System.currentTimeMillis() - time.get()) < minIntervalMS) {
            logger.info("Sleeping some time to avoid spamming host '" + host + "'");
            Thread.sleep(otherwiseSleepMS);
            time.set(System.currentTimeMillis());
        }
    }
    if (lastRequestedHosts.size() > maxStoredHosts) {
        new CollectionsUtil().removeKeyWithSmallestValue(lastRequestedHosts);
    }
}

From source file:com.lithium.flow.vault.AgentServer.java

public AgentServer(@Nonnull Config config) throws IOException {
    checkNotNull(config);//ww w. j ava2s  . co m

    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    IOUtils.copy(System.in, baos);
    byte[] bytes = baos.toByteArray();

    ServerSocket server = new ServerSocket(config.getInt("agent.port"), -1, InetAddress.getByName(null));
    long endTime = System.currentTimeMillis() + config.getTime("agent.maximumTime", "1d");
    long inactiveTime = config.getTime("agent.inactiveTime", "8h");
    AtomicLong lastTime = new AtomicLong(System.currentTimeMillis());

    new LoopThread(() -> {
        try {
            Socket socket = server.accept();
            log.info("accepted connection: {}", socket);
            try (OutputStream out = socket.getOutputStream()) {
                IOUtils.copy(new ByteArrayInputStream(bytes), out);
            }
        } catch (IOException e) {
            //
        }

        lastTime.set(System.currentTimeMillis());
    });

    new LoopThread(1000, () -> {
        long time = System.currentTimeMillis();
        if (time > endTime) {
            log.info("maximum time reached");
            System.exit(0);
        }

        if (time > lastTime.get() + inactiveTime) {
            log.info("inactive time reached");
            System.exit(0);
        }
    });

    log.info("started agent on port {}", server.getLocalPort());
    Sleep.forever();
}

From source file:nl.salp.warcraft4j.dev.casc.dbc.DbcAnalyser.java

public void analyse() {
    /*// w  w  w .  j a  va 2 s  . c  o m
    Set<String> knownDbcFileNames = getKnownDbcFilesByName();
    Set<String> invalidDbcFileNames = getInvalidDbcFilesByName();
    Set<String> noDataDbcFileNames = getDbcFilesWithNoDataByName();
    System.out.println(format("------------------------[  KNOWN DBC FILES (%d)  ]------------------------", knownDbcFileNames.size()));
    System.out.println(format("------------------------[ INVALID DBC FILES (%d) ]------------------------", invalidDbcFileNames.size()));
    invalidDbcFileNames.stream().forEach(System.out::println);
    System.out.println(format("------------------------[ NO DATA DBC FILES (%d) ]------------------------", noDataDbcFileNames.size()));
    noDataDbcFileNames.stream().forEach(System.out::println);
    */
    // Force CASC loading.
    final Set<Long> knownHashes = cascContext.getHashes();
    final int maxChars = 20;
    System.out.println(format("Brute forcing names with up to %d characters", maxChars));
    final AtomicLong count = new AtomicLong(0);
    final Map<String, Long> resolvedNames = new HashMap<>();
    new DbcFilenameGenerator(maxChars, () -> (filename) -> {
        long hash = CdnCascContext.hashFilename(filename);
        if (knownHashes.contains(hash)) {
            resolvedNames.put(filename, hash);
            LOGGER.debug("Resolved filename {} to CASC known hash {}", filename, hash);
        }
        count.incrementAndGet();
    }).execute();
    LOGGER.info(
            "Attempted hashing resolution on {} filenames against {} known hashes, resulting in {} resolved CASC hashes.",
            count.get(), knownHashes.size(), resolvedNames.size());

    /*
    getDbcFiles().stream()
        .forEach(f -> System.out.println(format("DbcFile [hash: %d, filename: %s, header: %s]",
                f.getFilenameHash(),
                f.getFilename()
                        .orElse("<unknown>"),
                f.getHeader()
                        .map(FileHeader::getHeader)
                        .map(String::new)
                        .orElse(""))));
    */
}