Example usage for java.util.concurrent.atomic AtomicLong get

List of usage examples for java.util.concurrent.atomic AtomicLong get

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicLong get.

Prototype

public final long get() 

Source Link

Document

Returns the current value, with memory effects as specified by VarHandle#getVolatile .

Usage

From source file:org.apache.qpid.server.jmx.mbeans.QueueMBean.java

public Long clearQueue() throws IOException, JMException {
    VirtualHost vhost = _queue.getParent(VirtualHost.class);
    final AtomicLong count = new AtomicLong();

    vhost.executeTransaction(new VirtualHost.TransactionalOperation() {
        public void withinTransaction(final VirtualHost.Transaction txn) {
            _queue.visit(new QueueEntryVisitor() {

                public boolean visit(final QueueEntry entry) {
                    final ServerMessage message = entry.getMessage();
                    if (message != null) {
                        txn.dequeue(entry);
                        count.incrementAndGet();

                    }/*from www  . ja v  a 2  s . c om*/
                    return false;
                }
            });

        }
    });
    return count.get();
}

From source file:org.apache.nifi.processors.standard.ExecuteSQL.java

@Override
public void onTrigger(final ProcessContext context, final ProcessSession session) throws ProcessException {
    FlowFile fileToProcess = null;//from w w  w.  j av a2s.c om
    if (context.hasIncomingConnection()) {
        fileToProcess = session.get();

        // If we have no FlowFile, and all incoming connections are self-loops then we can continue on.
        // However, if we have no FlowFile and we have connections coming from other Processors, then
        // we know that we should run only if we have a FlowFile.
        if (fileToProcess == null && context.hasNonLoopConnection()) {
            return;
        }
    }

    final ComponentLog logger = getLogger();
    final DBCPService dbcpService = context.getProperty(DBCP_SERVICE).asControllerService(DBCPService.class);
    final Integer queryTimeout = context.getProperty(QUERY_TIMEOUT).asTimePeriod(TimeUnit.SECONDS).intValue();
    final boolean convertNamesForAvro = context.getProperty(NORMALIZE_NAMES_FOR_AVRO).asBoolean();
    final StopWatch stopWatch = new StopWatch(true);
    final String selectQuery;
    if (context.getProperty(SQL_SELECT_QUERY).isSet()) {
        selectQuery = context.getProperty(SQL_SELECT_QUERY).evaluateAttributeExpressions(fileToProcess)
                .getValue();
    } else {
        // If the query is not set, then an incoming flow file is required, and expected to contain a valid SQL select query.
        // If there is no incoming connection, onTrigger will not be called as the processor will fail when scheduled.
        final StringBuilder queryContents = new StringBuilder();
        session.read(fileToProcess, new InputStreamCallback() {
            @Override
            public void process(InputStream in) throws IOException {
                queryContents.append(IOUtils.toString(in));
            }
        });
        selectQuery = queryContents.toString();
    }

    try (final Connection con = dbcpService.getConnection(); final Statement st = con.createStatement()) {
        st.setQueryTimeout(queryTimeout); // timeout in seconds
        final AtomicLong nrOfRows = new AtomicLong(0L);
        if (fileToProcess == null) {
            fileToProcess = session.create();
        }
        fileToProcess = session.write(fileToProcess, new OutputStreamCallback() {
            @Override
            public void process(final OutputStream out) throws IOException {
                try {
                    logger.debug("Executing query {}", new Object[] { selectQuery });
                    final ResultSet resultSet = st.executeQuery(selectQuery);
                    nrOfRows.set(JdbcCommon.convertToAvroStream(resultSet, out, convertNamesForAvro));
                } catch (final SQLException e) {
                    throw new ProcessException(e);
                }
            }
        });

        // set attribute how many rows were selected
        fileToProcess = session.putAttribute(fileToProcess, RESULT_ROW_COUNT, String.valueOf(nrOfRows.get()));

        logger.info("{} contains {} Avro records; transferring to 'success'",
                new Object[] { fileToProcess, nrOfRows.get() });
        session.getProvenanceReporter().modifyContent(fileToProcess, "Retrieved " + nrOfRows.get() + " rows",
                stopWatch.getElapsed(TimeUnit.MILLISECONDS));
        session.transfer(fileToProcess, REL_SUCCESS);
    } catch (final ProcessException | SQLException e) {
        if (fileToProcess == null) {
            // This can happen if any exceptions occur while setting up the connection, statement, etc.
            logger.error("Unable to execute SQL select query {} due to {}. No FlowFile to route to failure",
                    new Object[] { selectQuery, e });
            context.yield();
        } else {
            if (context.hasIncomingConnection()) {
                logger.error("Unable to execute SQL select query {} for {} due to {}; routing to failure",
                        new Object[] { selectQuery, fileToProcess, e });
                fileToProcess = session.penalize(fileToProcess);
            } else {
                logger.error("Unable to execute SQL select query {} due to {}; routing to failure",
                        new Object[] { selectQuery, e });
                context.yield();
            }
            session.transfer(fileToProcess, REL_FAILURE);
        }
    }
}

From source file:org.apache.activemq.artemis.tests.integration.persistence.metrics.JournalPendingMessageTest.java

@Test
public void testTopicMessageSizeShared() throws Exception {
    AtomicLong publishedMessageSize = new AtomicLong();

    Connection connection = cf.createConnection();
    connection.setClientID("clientId");
    connection.start();//from   w  w  w .j a va2s  . co  m
    Session session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE);
    MessageConsumer consumer = session.createSharedConsumer(session.createTopic(defaultTopicName), "sub1");
    MessageConsumer consumer2 = session.createSharedConsumer(session.createTopic(defaultTopicName), "sub1");

    publishTestTopicMessages(200, publishedMessageSize);

    verifyPendingStats(defaultTopicName, 200, publishedMessageSize.get());
    verifyPendingDurableStats(defaultTopicName, 0, 0);
    consumer2.close();

    // consume all messages
    consumeTestMessages(consumer, 200);

    // All messages should now be gone
    verifyPendingStats(defaultTopicName, 0, 0);
    verifyPendingDurableStats(defaultTopicName, 0, 0);

    connection.close();
}

From source file:org.archive.crawler.admin.StatisticsTracker.java

protected void writeHostsReportTo(final PrintWriter writer) {
    // TODO: use CrawlHosts for all stats; only perform sorting on 
    // manageable number of hosts
    SortedMap<String, AtomicLong> hd = getReverseSortedHostsDistribution();
    // header// www .j a  v  a2 s  .c  o  m
    writer.print(
            "[#urls] [#bytes] [host] [#robots] [#remaining] [#novel-urls] [#novel-bytes] [#dup-by-hash-urls] [#dup-by-hash-bytes] [#not-modified-urls] [#not-modified-bytes]\n");
    for (String key : hd.keySet()) {
        // Key is 'host'.
        CrawlHost host = controller.getServerCache().getHostFor(key);
        AtomicLong val = hd.get(key);
        writeReportLine(writer, val == null ? "-" : val.get(), getBytesPerHost(key), key,
                host.getSubstats().getRobotsDenials(), host.getSubstats().getRemaining(),
                host.getSubstats().getNovelUrls(), host.getSubstats().getNovelBytes(),
                host.getSubstats().getDupByHashUrls(), host.getSubstats().getDupByHashBytes(),
                host.getSubstats().getNotModifiedUrls(), host.getSubstats().getNotModifiedBytes());
    }
    // StatisticsTracker doesn't know of zero-completion hosts; 
    // so supplement report with those entries from host cache
    Closure logZeros = new Closure() {
        public void execute(Object obj) {
            CrawlHost host = (CrawlHost) obj;
            if (host.getSubstats().getRecordedFinishes() == 0) {
                writeReportLine(writer, host.getSubstats().getRecordedFinishes(),
                        host.getSubstats().getTotalBytes(), host.getHostName(),
                        host.getSubstats().getRobotsDenials(), host.getSubstats().getRemaining(),
                        host.getSubstats().getNovelUrls(), host.getSubstats().getNovelBytes(),
                        host.getSubstats().getDupByHashUrls(), host.getSubstats().getDupByHashBytes(),
                        host.getSubstats().getNotModifiedUrls(), host.getSubstats().getNotModifiedBytes());
            }
        }
    };
    controller.getServerCache().forAllHostsDo(logZeros);
}

From source file:org.dswarm.wikidataimporter.WikidataDswarmImporter.java

private boolean checkAndOptionallyUpdateBigCounter(final AtomicLong count, final AtomicLong bigCount) {

    boolean needsUpdate = true;
    boolean updated = false;

    do {//  w  ww .j a v  a  2s  .  c o  m

        final long currentCount = count.get();
        final long currentBigCount = bigCount.get();

        needsUpdate = currentCount / 10000 == currentBigCount;

        if (needsUpdate) {

            needsUpdate = !bigStatementCount.compareAndSet(currentBigCount, currentBigCount + 1);

            updated = !needsUpdate;
        }
    } while (needsUpdate);

    return updated;
}

From source file:info.archinnov.achilles.it.TestCRUDSimpleEntity.java

@Test
public void should_insert_if_not_exists() throws Exception {
    //Given/* www . ja va  2 s .  c  o m*/
    final long id = 100L;
    final Date date = buildDateKey();
    scriptExecutor.executeScriptTemplate("SimpleEntity/insert_single_row.cql",
            ImmutableMap.of("id", id, "table", "simple"));

    final SimpleEntity entity = new SimpleEntity(id, date, "value");
    final AtomicBoolean error = new AtomicBoolean(false);
    final AtomicLong currentId = new AtomicLong(0L);

    final LWTResultListener lwtListener = new LWTResultListener() {

        @Override
        public void onSuccess() {

        }

        @Override
        public void onError(LWTResult lwtResult) {
            error.getAndSet(true);
            currentId.getAndSet(lwtResult.currentValues().getTyped("id"));
        }
    };

    //When
    manager.crud().insert(entity).ifNotExists().withLwtResultListener(lwtListener).execute();

    //Then
    assertThat(error.get()).isTrue();
    assertThat(currentId.get()).isEqualTo(id);
}

From source file:org.lendingclub.mercator.docker.SwarmScanner.java

public void scanServicesForSwarm(String swarmClusterId) {

    JsonNode response = getRestClient().getServices();

    AtomicLong earlistUpdate = new AtomicLong(Long.MAX_VALUE);
    AtomicBoolean error = new AtomicBoolean(false);
    response.forEach(it -> {/*  w w w .  j ava2s.  c  o  m*/
        try {
            ObjectNode n = flattenService(it);
            n.put("swarmClusterId", swarmClusterId);
            dockerScanner.getNeoRxClient().execCypher(
                    "merge (x:DockerService {serviceId:{serviceId}}) set x+={props}, x.updateTs=timestamp() return x",
                    "serviceId", n.get("serviceId").asText(), "props", n).forEach(svc -> {
                        removeDockerLabels("DockerService", "serviceId", n.get("serviceId").asText(), n, svc);
                        earlistUpdate.set(
                                Math.min(earlistUpdate.get(), svc.path("updateTs").asLong(Long.MAX_VALUE)));
                    });
            dockerScanner.getNeoRxClient().execCypher(
                    "match (swarm:DockerSwarm {swarmClusterId:{swarmClusterId}}),(service:DockerService{serviceId:{serviceId}}) merge (swarm)-[x:CONTAINS]->(service) set x.updateTs=timestamp()",
                    "swarmClusterId", swarmClusterId, "serviceId", n.path("serviceId").asText());

        } catch (Exception e) {
            logger.warn("problem updating service", e);
            error.set(true);
        }
    });
    if (error.get() == false) {
        if (earlistUpdate.get() < System.currentTimeMillis()) {
            dockerScanner.getNeoRxClient().execCypher(
                    "match (x:DockerService) where x.swarmClusterId={swarmClusterId} and x.updateTs<{cutoff} detach delete x",
                    "cutoff", earlistUpdate.get(), "swarmClusterId", swarmClusterId);
        }
    }

}

From source file:org.axonframework.migration.eventstore.JpaEventStoreMigrator.java

public boolean run() throws Exception {
    final AtomicInteger updateCount = new AtomicInteger();
    final AtomicInteger skipCount = new AtomicInteger();
    final AtomicLong lastId = new AtomicLong(
            Long.parseLong(configuration.getProperty("lastProcessedId", "-1")));
    try {// w  w  w  . j ava2  s.  co m
        TransactionTemplate template = new TransactionTemplate(txManager);
        template.setReadOnly(true);
        System.out.println("Starting conversion. Fetching batches of " + QUERY_BATCH_SIZE + " items.");
        while (template.execute(new TransactionCallback<Boolean>() {
            @Override
            public Boolean doInTransaction(TransactionStatus status) {
                final Session hibernate = entityManager.unwrap(Session.class);
                Iterator<Object[]> results = hibernate.createQuery(
                        "SELECT e.aggregateIdentifier, e.sequenceNumber, e.type, e.id FROM DomainEventEntry e "
                                + "WHERE e.id > :lastIdentifier ORDER BY e.id ASC")
                        .setFetchSize(1000).setMaxResults(QUERY_BATCH_SIZE).setReadOnly(true)
                        .setParameter("lastIdentifier", lastId.get()).iterate();
                if (!results.hasNext()) {
                    System.out.println("Empty batch. Assuming we're done.");
                    return false;
                } else if (Thread.interrupted()) {
                    System.out.println("Received an interrupt. Stopping...");
                    return false;
                }
                while (results.hasNext()) {
                    List<ConversionItem> conversionBatch = new ArrayList<ConversionItem>();
                    while (conversionBatch.size() < CONVERSION_BATCH_SIZE && results.hasNext()) {
                        Object[] item = results.next();
                        String aggregateIdentifier = (String) item[0];
                        long sequenceNumber = (Long) item[1];
                        String type = (String) item[2];
                        Long entryId = (Long) item[3];
                        lastId.set(entryId);
                        conversionBatch
                                .add(new ConversionItem(sequenceNumber, aggregateIdentifier, type, entryId));
                    }
                    if (!conversionBatch.isEmpty()) {
                        executor.submit(new TransformationTask(conversionBatch, skipCount));
                    }
                }
                return true;
            }
        })) {
            System.out.println("Reading next batch, starting at ID " + lastId.get() + ".");
            System.out.println(
                    "Estimated backlog size is currently: " + (workQueue.size() * CONVERSION_BATCH_SIZE));
        }
    } finally {
        executor.shutdown();
        executor.awaitTermination(5, TimeUnit.MINUTES);
        if (lastId.get() >= 0) {
            System.out.println(
                    "Processed events from old event store up to (and including) id = " + lastId.get());
        }
    }
    System.out.println("In total " + updateCount.get() + " items have been converted.");
    return skipCount.get() == 0;
}

From source file:discord4j.core.ExampleBot.java

@Test
@Ignore("Example code excluded from CI")
public void testCommandBot() {
    DiscordClient client = new DiscordClientBuilder(token).build();

    // Get the bot owner ID to filter commands
    AtomicLong ownerId = new AtomicLong();
    Flux.first(client.getEventDispatcher().on(ReadyEvent.class),
            client.getEventDispatcher().on(ResumeEvent.class)).next()
            .flatMap(evt -> client.getApplicationInfo()).map(ApplicationInfo::getOwnerId).map(Snowflake::asLong)
            .subscribe(ownerId::set);//  w  ww.ja v a  2  s  .c  o  m

    // Create our event handlers
    List<EventHandler> eventHandlers = new ArrayList<>();
    eventHandlers.add(new AddRole());
    eventHandlers.add(new Echo());
    eventHandlers.add(new UserInfo());
    eventHandlers.add(new LogLevelChange());
    eventHandlers.add(new BlockingEcho());
    eventHandlers.add(new Reactor());
    eventHandlers.add(new ChangeAvatar());

    // Build a safe event-processing pipeline
    client.getEventDispatcher().on(MessageCreateEvent.class)
            .filter(event -> event.getMessage().getAuthor().map(User::getId).map(Snowflake::asLong)
                    .filter(id -> ownerId.get() == id).isPresent())
            .flatMap(event -> Mono.whenDelayError(eventHandlers.stream()
                    .map(handler -> handler.onMessageCreate(event)).collect(Collectors.toList())))
            .onErrorContinue((t, o) -> log.error("Error while processing event", t)).subscribe();

    client.login().block();
}

From source file:sx.blah.discord.handle.impl.obj.Channel.java

@Override
public MessageHistory getMessageHistory(int messageCount) {
    if (messageCount <= messages.size()) { // we already have all of the wanted messages in the cache
        return new MessageHistory(messages.values().stream().sorted(new MessageComparator(true))
                .limit(messageCount).collect(Collectors.toList()));
    } else {//from   w w w.j  av  a2s.  c o  m
        List<IMessage> retrieved = new ArrayList<>(messageCount);
        AtomicLong lastMessage = new AtomicLong(DiscordUtils.getSnowflakeFromTimestamp(Instant.now()));
        int chunkSize = messageCount < MESSAGE_CHUNK_COUNT ? messageCount : MESSAGE_CHUNK_COUNT;

        while (retrieved.size() < messageCount) { // while we dont have messageCount messages
            IMessage[] chunk = getHistory(lastMessage.get(), chunkSize);

            if (chunk.length == 0)
                break;

            lastMessage.set(chunk[chunk.length - 1].getLongID());
            Collections.addAll(retrieved, chunk);
        }

        return new MessageHistory(
                retrieved.size() > messageCount ? retrieved.subList(0, messageCount) : retrieved);
    }
}