Example usage for java.util.concurrent.atomic AtomicLong incrementAndGet

List of usage examples for java.util.concurrent.atomic AtomicLong incrementAndGet

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicLong incrementAndGet.

Prototype

public final long incrementAndGet() 

Source Link

Document

Atomically increments the current value, with memory effects as specified by VarHandle#getAndAdd .

Usage

From source file:ai.susi.mind.SusiMind.java

/**
 * This is the core principle of creativity: being able to match a given input
 * with problem-solving knowledge./*from   ww w .j a  va2  s  . com*/
 * This method finds ideas (with a query instantiated skills) for a given query.
 * The skills are selected using a scoring system and pattern matching with the query.
 * Not only the most recent user query is considered for skill selection but also
 * previously requested queries and their answers to be able to set new skill selections
 * in the context of the previous conversation.
 * @param query the user input
 * @param previous_argument the latest conversation with the same user
 * @param maxcount the maximum number of ideas to return
 * @return an ordered list of ideas, first idea should be considered first.
 */
public List<SusiIdea> creativity(String query, SusiThought latest_thought, int maxcount) {
    // tokenize query to have hint for idea collection
    final List<SusiIdea> ideas = new ArrayList<>();
    this.reader.tokenizeSentence(query).forEach(token -> {
        Set<SusiSkill> skill_for_category = this.skilltrigger.get(token.categorized);
        Set<SusiSkill> skill_for_original = token.original.equals(token.categorized) ? null
                : this.skilltrigger.get(token.original);
        Set<SusiSkill> r = new HashSet<>();
        if (skill_for_category != null)
            r.addAll(skill_for_category);
        if (skill_for_original != null)
            r.addAll(skill_for_original);
        r.forEach(skill -> ideas.add(new SusiIdea(skill).setIntent(token)));
    });

    for (SusiIdea idea : ideas)
        DAO.log("idea.phrase-1: score=" + idea.getSkill().getScore().score + " : "
                + idea.getSkill().getPhrases().toString() + " " + idea.getSkill().getActionsClone());

    // add catchall skills always (those are the 'bad ideas')
    Collection<SusiSkill> ca = this.skilltrigger.get(SusiSkill.CATCHALL_KEY);
    if (ca != null)
        ca.forEach(skill -> ideas.add(new SusiIdea(skill)));

    // create list of all ideas that might apply
    TreeMap<Long, List<SusiIdea>> scored = new TreeMap<>();
    AtomicLong count = new AtomicLong(0);
    ideas.forEach(idea -> {
        int score = idea.getSkill().getScore().score;
        long orderkey = Long.MAX_VALUE - ((long) score) * 1000L + count.incrementAndGet();
        List<SusiIdea> r = scored.get(orderkey);
        if (r == null) {
            r = new ArrayList<>();
            scored.put(orderkey, r);
        }
        r.add(idea);
    });

    // make a sorted list of all ideas
    ideas.clear();
    scored.values().forEach(r -> ideas.addAll(r));

    for (SusiIdea idea : ideas)
        DAO.log("idea.phrase-2: score=" + idea.getSkill().getScore().score + " : "
                + idea.getSkill().getPhrases().toString() + " " + idea.getSkill().getActionsClone());

    // test ideas and collect those which match up to maxcount
    List<SusiIdea> plausibleIdeas = new ArrayList<>(Math.min(10, maxcount));
    for (SusiIdea idea : ideas) {
        SusiSkill skill = idea.getSkill();
        Collection<Matcher> m = skill.matcher(query);
        if (m.isEmpty())
            continue;
        // TODO: evaluate leading SEE flow commands right here as well
        plausibleIdeas.add(idea);
        if (plausibleIdeas.size() >= maxcount)
            break;
    }

    for (SusiIdea idea : plausibleIdeas) {
        DAO.log("idea.phrase-3: score=" + idea.getSkill().getScore().score + " : "
                + idea.getSkill().getPhrases().toString() + " " + idea.getSkill().getActionsClone());
        DAO.log("idea.phrase-3:   log=" + idea.getSkill().getScore().log);
    }

    return plausibleIdeas;
}

From source file:org.sonar.server.benchmark.SourceDbBenchmarkTest.java

private void scrollRows() throws SQLException {
    LOGGER.info("Scroll table FILE_SOURCES");
    DbClient dbClient = new DbClient(dbTester.database(), dbTester.myBatis());
    Connection connection = dbTester.openConnection();
    AtomicLong counter = new AtomicLong();
    ProgressTask progress = new ProgressTask(LOGGER, "source file", counter);
    Timer timer = new Timer("SourceDbScroll");
    timer.schedule(progress, ProgressTask.PERIOD_MS, ProgressTask.PERIOD_MS);

    try {/*from  w w w . j  ava  2  s .  c  o m*/
        long start = System.currentTimeMillis();
        SourceLineResultSetIterator it = SourceLineResultSetIterator.create(dbClient, connection, 0L);
        while (it.hasNext()) {
            SourceLineResultSetIterator.SourceFile row = it.next();
            assertThat(row.getLines().size()).isEqualTo(3220);
            assertThat(row.getFileUuid()).isNotEmpty();
            counter.incrementAndGet();
        }
        long end = System.currentTimeMillis();
        long period = end - start;
        long throughputPerSecond = 1000L * counter.get() / period;
        LOGGER.info(String.format("%d FILE_SOURCES rows scrolled in %d ms (%d rows/second)", counter.get(),
                period, throughputPerSecond));

    } finally {
        DbUtils.closeQuietly(connection);
        timer.cancel();
    }
}

From source file:com.facebook.presto.accumulo.tools.RewriteIndex.java

private void setRowIdStatuses(Connector connector, AccumuloTable table, long timestamp,
        Multimap<ByteBuffer, Mutation> queryIndexEntries, Map<ByteBuffer, RowStatus> rowIdStatuses)
        throws TableNotFoundException {
    // Set ranges to all row IDs that we have no status for
    List<Range> queryRanges = queryIndexEntries.keySet().stream().filter(x -> !rowIdStatuses.containsKey(x))
            .map(x -> new Range(new Text(x.array()))).collect(Collectors.toList());

    if (queryRanges.size() == 0) {
        return;/*  w  w  w . j  a  va 2  s.c o  m*/
    }

    BatchScanner scanner = connector.createBatchScanner(table.getFullTableName(), auths, 10);
    scanner.setRanges(queryRanges);

    IteratorSetting iteratorSetting = new IteratorSetting(Integer.MAX_VALUE, TimestampFilter.class);
    TimestampFilter.setEnd(iteratorSetting, timestamp, true);
    scanner.addScanIterator(iteratorSetting);

    scanner.addScanIterator(new IteratorSetting(1, FirstEntryInRowIterator.class));

    // Make a copy of all the row IDs we are querying on to back-fill collection
    Set<ByteBuffer> allRowIDs = new HashSet<>(queryIndexEntries.keySet());

    // Scan the data table, removing all known row IDs and setting their status to present
    Text text = new Text();
    for (Entry<Key, Value> entry : scanner) {
        ByteBuffer rowID = ByteBuffer.wrap(entry.getKey().getRow(text).copyBytes());
        allRowIDs.remove(rowID);

        // Assert that this entry is new
        if (rowIdStatuses.put(rowID, RowStatus.PRESENT) != null) {
            throw new RuntimeException(
                    format("Internal error, row %s already has status", new String(rowID.array(), UTF_8)));
        }
    }
    scanner.close();

    AtomicLong newlyAbsent = new AtomicLong(0);
    // Back-fill the absent map -- rows may already be flagged as absent
    allRowIDs.forEach(rowID -> {
        RowStatus existingStatus = rowIdStatuses.get(rowID);
        if (existingStatus == null) {
            newlyAbsent.incrementAndGet();
            rowIdStatuses.put(rowID, RowStatus.ABSENT);
        } else if (existingStatus == RowStatus.PRESENT) {
            throw new RuntimeException(format("Internal error, row %s already has PRESENT status",
                    new String(rowID.array(), UTF_8)));
        }
    });
}

From source file:com.facebook.presto.accumulo.tools.RewriteIndex.java

private void flushDeleteEntries(Connector connector, AccumuloTable table, long start, BatchWriter indexWriter,
        Multimap<ByteBuffer, Mutation> queryIndexEntries, Map<ByteBuffer, RowStatus> rowIdStatuses)
        throws MutationsRejectedException, TableNotFoundException {
    if (queryIndexEntries.size() > 0) {
        setRowIdStatuses(connector, table, start, queryIndexEntries, rowIdStatuses);

        AtomicLong numDeleteRows = new AtomicLong(0);
        ImmutableList.Builder<Mutation> builder = ImmutableList.builder();
        queryIndexEntries.asMap().entrySet().forEach(entry -> {
            if (rowIdStatuses.get(entry.getKey()) == RowStatus.ABSENT) {
                builder.addAll(entry.getValue());
                numDeleteRows.incrementAndGet();
            }//from  www  . j av  a2 s . c om
        });
        List<Mutation> deleteMutations = builder.build();

        numDeletedIndexEntries += deleteMutations.size();

        if (!dryRun) {
            indexWriter.addMutations(deleteMutations);
        }
    }
}

From source file:com.github.jackygurui.vertxredissonrepository.repository.SaveCustomerCocurrentTest.java

@Test
public void test3SaveCustomerConcurrent(TestContext context) throws Exception {
    Async async = context.async();//www  . j ava 2  s  .co m
    JsonNode source = JsonLoader.fromResource("/Customer.json");
    int records = 10000;
    HanyuPinyin.convert("");//warm up
    AtomicLong c = new AtomicLong(0);
    StopWatch sw = new StopWatch();
    sw.start();
    IntStream.rangeClosed(1, records).parallel().forEach(e -> {
        JsonObject clone = new JsonObject(Json.encode(source));
        clone.getJsonObject("personalDetails").put("phoneNumber",
                ((Long.parseLong(clone.getJsonObject("personalDetails").getString("phoneNumber")) + 100 + e)
                        + ""));
        customerRepository.create(Json.encode(clone), r -> {
            AsyncResult<String> result = (AsyncResult<String>) r;
            if (result.failed()) {
                context.fail(result.cause());
                async.complete();
            }
            if (c.incrementAndGet() == records) {
                sw.stop();
                logger.info("time to concurrently save " + records + " customer records: " + sw.getTime());
                async.complete();
            }
        });
    });
}

From source file:de.tudarmstadt.lt.seg.app.Segmenter.java

private void run_parallel() throws Exception {

    InputStream in = System.in;
    if (!"-".equals(_filename_in))
        in = new FileInputStream(_filename_in);
    Stream<String> liter = new BufferedReader(new InputStreamReader(in, Charset.defaultCharset())).lines();

    ThreadLocal<ISentenceSplitter> sentenceSplitter = ThreadLocal.withInitial(() -> {
        try {//from w  w  w .ja  v  a 2 s  .co m
            return newSentenceSplitter();
        } catch (ClassNotFoundException | InstantiationException | IllegalAccessException e) {
            throw new RuntimeException(e);
        }
    });
    ThreadLocal<ITokenizer> tokenizer = ThreadLocal.withInitial(() -> {
        try {
            return newTokenizer();
        } catch (ClassNotFoundException | InstantiationException | IllegalAccessException e) {
            throw new RuntimeException(e);
        }
    });

    final PrintWriter[] w = new PrintWriter[_parallelism];
    // init writers
    for (int i = 0; i < _parallelism; i++) {
        OutputStream out = System.out;
        if (!"-".equals(_filename_out)) {
            out = new FileOutputStream(String.format("%s_%d", _filename_out, i));
        }
        w[i] = new PrintWriter(new OutputStreamWriter(out, Charset.defaultCharset()));
    }

    BlockingQueue<Runnable> queue = new ArrayBlockingQueue<Runnable>(_parallelism * 2, true);
    ExecutorService es = new ThreadPoolExecutor(_parallelism, _parallelism, 0L, TimeUnit.MILLISECONDS, queue);

    AtomicLong lc = new AtomicLong(0);
    liter.forEach((line) -> {
        // don't try to submit new threads, wait until the thread queue has some capacity again
        while (queue.remainingCapacity() == 0)
            try {
                Thread.sleep(10);
            } catch (InterruptedException e) {
                /**/}
        es.submit(() -> {
            final long docid = lc.incrementAndGet();
            if (docid % 1000 == 0)
                System.err.format("Processing line %d ('%s')%n", docid, _filename_in);
            final int w_i = (int) (docid % _parallelism);
            split_and_tokenize(new StringReader(line.trim()), String.format("%s:%d", _filename_in, docid),
                    sentenceSplitter.get(), tokenizer.get(), _level_filter, _level_normalize, _merge_types,
                    _merge_tokens, _separator_sentence, _separator_token, _separator_desc, w[w_i]);

        });
    });
    es.shutdown();
    es.awaitTermination(Integer.MAX_VALUE, TimeUnit.DAYS);

    // TODO: the stream parallelism version does not work because it submits too many threads at once
    //      AtomicLong lc = new AtomicLong(0);
    //      ForkJoinPool forkJoinPool = new ForkJoinPool(_parallelism);
    //      forkJoinPool.submit(() -> 
    //         liter.parallel().forEach((line) -> {
    //            final long docid = lc.incrementAndGet();
    //            if(docid % 1000 == 0)
    //               System.err.format("Processing line %d ('%s')%n", docid, _filename_in);
    //   
    //            String l = line.replace("\\t", "\t").replace("\\n", "\n");
    //            split_and_tokenize(
    //                  new StringReader(l),
    //                  String.format("%s:%d", _filename_in, docid),
    //                  sentenceSplitter.get(), 
    //                  tokenizer.get(), 
    //                  _level_filter,
    //                  _level_normalize,
    //                  _merge_types,
    //                  _merge_tokens,
    //                  _separator_sentence,
    //                  _separator_token,
    //                  _separator_desc,
    //                  w);
    //      })).get();

}

From source file:org.springframework.amqp.rabbit.core.RabbitTemplatePublisherCallbacksIntegrationTests.java

@Test
public void testPublisherConfirmGetUnconfirmedConcurrency() throws Exception {
    ConnectionFactory mockConnectionFactory = mock(ConnectionFactory.class);
    Connection mockConnection = mock(Connection.class);
    Channel mockChannel = mock(Channel.class);
    when(mockChannel.isOpen()).thenReturn(true);
    final AtomicLong seq = new AtomicLong();
    doAnswer(invocation -> seq.incrementAndGet()).when(mockChannel).getNextPublishSeqNo();

    when(mockConnectionFactory.newConnection(any(ExecutorService.class), anyString()))
            .thenReturn(mockConnection);
    when(mockConnection.isOpen()).thenReturn(true);
    doReturn(mockChannel).when(mockConnection).createChannel();

    CachingConnectionFactory ccf = new CachingConnectionFactory(mockConnectionFactory);
    ccf.setPublisherConfirms(true);//from  w w w  .j  a v a2  s  .com
    final RabbitTemplate template = new RabbitTemplate(ccf);

    final AtomicBoolean confirmed = new AtomicBoolean();
    template.setConfirmCallback((correlationData, ack, cause) -> confirmed.set(true));
    ExecutorService exec = Executors.newSingleThreadExecutor();
    final AtomicBoolean sentAll = new AtomicBoolean();
    exec.execute(() -> {
        for (int i = 0; i < 10000; i++) {
            template.convertAndSend(ROUTE, (Object) "message", new CorrelationData("abc"));
        }
        sentAll.set(true);
    });
    long t1 = System.currentTimeMillis();
    while (!sentAll.get() && System.currentTimeMillis() < t1 + 20000) {
        template.getUnconfirmed(-1);
    }
    assertTrue(sentAll.get());
    assertFalse(confirmed.get());
}

From source file:org.springframework.amqp.rabbit.core.RabbitTemplatePublisherCallbacksIntegrationTests.java

private void testPublisherConfirmCloseConcurrency(final int closeAfter) throws Exception {
    ConnectionFactory mockConnectionFactory = mock(ConnectionFactory.class);
    Connection mockConnection = mock(Connection.class);
    Channel mockChannel1 = mock(Channel.class);
    final AtomicLong seq1 = new AtomicLong();
    doAnswer(invocation -> seq1.incrementAndGet()).when(mockChannel1).getNextPublishSeqNo();

    Channel mockChannel2 = mock(Channel.class);
    when(mockChannel2.isOpen()).thenReturn(true);
    final AtomicLong seq2 = new AtomicLong();
    doAnswer(invocation -> seq2.incrementAndGet()).when(mockChannel2).getNextPublishSeqNo();

    when(mockConnectionFactory.newConnection(any(ExecutorService.class), anyString()))
            .thenReturn(mockConnection);
    when(mockConnection.isOpen()).thenReturn(true);
    when(mockConnection.createChannel()).thenReturn(mockChannel1, mockChannel2);

    CachingConnectionFactory ccf = new CachingConnectionFactory(mockConnectionFactory);
    ccf.setPublisherConfirms(true);// www .  jav a 2  s  . c o m
    final RabbitTemplate template = new RabbitTemplate(ccf);

    final CountDownLatch confirmed = new CountDownLatch(1);
    template.setConfirmCallback((correlationData, ack, cause) -> confirmed.countDown());
    ExecutorService exec = Executors.newSingleThreadExecutor();
    final AtomicInteger sent = new AtomicInteger();
    doAnswer(invocation -> sent.incrementAndGet() < closeAfter).when(mockChannel1).isOpen();
    final CountDownLatch sentAll = new CountDownLatch(1);
    exec.execute(() -> {
        for (int i = 0; i < 1000; i++) {
            try {
                template.convertAndSend(ROUTE, (Object) "message", new CorrelationData("abc"));
            } catch (AmqpException e) {
            }
        }
        sentAll.countDown();
    });
    assertTrue(sentAll.await(10, TimeUnit.SECONDS));
    assertTrue(confirmed.await(10, TimeUnit.SECONDS));
}

From source file:com.github.jackygurui.vertxredissonrepository.repository.SaveAndGetCustomerConcurrentWaterfallTest.java

@Test
public void test4SaveAndGetCustomerConcurrentWaterfall(TestContext context) throws Exception {
    Async async = context.async();//from w  w  w .  j a  v  a2s  .  c  o m
    JsonNode source = JsonLoader.fromResource("/Customer.json");
    int records = 10000;
    HanyuPinyin.convert("");//warm up
    AtomicLong counter = new AtomicLong(0);
    StopWatch sw = new StopWatch();
    sw.start();
    IntStream.rangeClosed(1, records).parallel().forEach(e -> {
        JsonObject clone = new JsonObject(Json.encode(source));
        clone.getJsonObject("personalDetails").put("phoneNumber",
                ((Long.parseLong(clone.getJsonObject("personalDetails").getString("phoneNumber")) + 5000 + e)
                        + ""));
        org.simondean.vertx.async.Async.waterfall().<String>task(t -> {
            customerRepository.create(Json.encode(clone), t);
        }).<Customer>task((id, t) -> {
            customerRepository.get(id, t);
        }).run(rr -> {
            long ct = counter.incrementAndGet();
            //                logger.info("Counter = " + ct + " | success = " + !r.failed());
            if (rr.succeeded()) {
                try {
                    Customer loaded = rr.result();
                    Customer c = Json.decodeValue(clone.encode(), Customer.class);
                    c.setId(loaded.getId());
                    c.getAddressDetails().setId(loaded.getId());
                    c.getPersonalDetails().setId(loaded.getId());
                    String encoded = Json.encode(c);
                    if (!rr.result().equals(encoded)) {
                        logger.info(loaded.getId() + " - SOURCE : " + encoded);
                        logger.info(loaded.getId() + " - RESULT : " + rr.result());
                    }
                    context.assertEquals(Json.encode(rr.result()), encoded);
                } catch (Exception ex) {
                    context.fail(ex);
                    async.complete();
                }
            } else {
                context.fail(rr.cause());
                async.complete();
            }
            if (ct == records) {
                sw.stop();
                logger.info("time to concurrently save and get using waterfall " + records
                        + " customer records: " + sw.getTime());
                async.complete();
            }
        });
    });
}

From source file:com.dataartisans.queryablestatedemo.BumpEventGeneratorSource.java

@Override
public void run(SourceContext<BumpEvent> sourceContext) throws Exception {
    final Random rand = new Random();
    final AtomicLong count = new AtomicLong();

    Thread throughputLogger = null;
    if (printThroughput) {
        throughputLogger = new Thread(new ThroughputLogger(count), "ThroughputLogger");
        throughputLogger.start();//  w  w  w  .j av a  2s  .  com
    }

    try {
        while (running) {
            // Generate random events
            final int userId = rand.nextInt(Integer.MAX_VALUE);

            final String itemCase = RandomStringUtils.randomAlphanumeric(ITEM_ID_NUM_CHARS).toLowerCase();

            synchronized (sourceContext.getCheckpointLock()) {
                sourceContext.collect(new BumpEvent(userId, itemCase));
            }

            // Increment count for throughput logger
            count.incrementAndGet();

            Thread.yield();
        }
    } finally {
        if (throughputLogger != null) {
            throughputLogger.interrupt();
            throughputLogger.join();
        }
    }
}