Example usage for java.util.concurrent.atomic AtomicLong incrementAndGet

List of usage examples for java.util.concurrent.atomic AtomicLong incrementAndGet

Introduction

In this page you can find the example usage for java.util.concurrent.atomic AtomicLong incrementAndGet.

Prototype

public final long incrementAndGet() 

Source Link

Document

Atomically increments the current value, with memory effects as specified by VarHandle#getAndAdd .

Usage

From source file:nl.salp.warcraft4j.dev.casc.dbc.DbcAnalyser.java

public void analyse() {
    /*//from  w w  w.  ja v  a 2 s  . c om
    Set<String> knownDbcFileNames = getKnownDbcFilesByName();
    Set<String> invalidDbcFileNames = getInvalidDbcFilesByName();
    Set<String> noDataDbcFileNames = getDbcFilesWithNoDataByName();
    System.out.println(format("------------------------[  KNOWN DBC FILES (%d)  ]------------------------", knownDbcFileNames.size()));
    System.out.println(format("------------------------[ INVALID DBC FILES (%d) ]------------------------", invalidDbcFileNames.size()));
    invalidDbcFileNames.stream().forEach(System.out::println);
    System.out.println(format("------------------------[ NO DATA DBC FILES (%d) ]------------------------", noDataDbcFileNames.size()));
    noDataDbcFileNames.stream().forEach(System.out::println);
    */
    // Force CASC loading.
    final Set<Long> knownHashes = cascContext.getHashes();
    final int maxChars = 20;
    System.out.println(format("Brute forcing names with up to %d characters", maxChars));
    final AtomicLong count = new AtomicLong(0);
    final Map<String, Long> resolvedNames = new HashMap<>();
    new DbcFilenameGenerator(maxChars, () -> (filename) -> {
        long hash = CdnCascContext.hashFilename(filename);
        if (knownHashes.contains(hash)) {
            resolvedNames.put(filename, hash);
            LOGGER.debug("Resolved filename {} to CASC known hash {}", filename, hash);
        }
        count.incrementAndGet();
    }).execute();
    LOGGER.info(
            "Attempted hashing resolution on {} filenames against {} known hashes, resulting in {} resolved CASC hashes.",
            count.get(), knownHashes.size(), resolvedNames.size());

    /*
    getDbcFiles().stream()
        .forEach(f -> System.out.println(format("DbcFile [hash: %d, filename: %s, header: %s]",
                f.getFilenameHash(),
                f.getFilename()
                        .orElse("<unknown>"),
                f.getHeader()
                        .map(FileHeader::getHeader)
                        .map(String::new)
                        .orElse(""))));
    */
}

From source file:ch.algotrader.ordermgmt.DefaultOrderBook.java

@Override
public String getNextOrderIdRevision(final String intId) {
    if (intId == null) {
        return null;
    }/*from   www. j a  va2 s  .c o  m*/
    int i = intId.indexOf('.');
    String baseId = i != -1 ? intId.substring(0, i) : null;
    if (baseId == null) {
        throw new OrderRegistryException("Unexpected internal order ID format: " + intId);
    }
    String s = intId.substring(baseId.length() + 1);
    long revision;
    try {
        revision = Long.parseLong(s);
    } catch (NumberFormatException ex) {
        throw new OrderRegistryException("Unexpected internal order ID format: " + intId);
    }
    AtomicLong count = this.revisionMap.compute(baseId, (key, existing) -> {
        if (existing != null) {
            if (existing.get() < revision) {
                existing.set(revision);
            }
            return existing;
        }
        return new AtomicLong(revision);
    });
    long nextRevision = count.incrementAndGet();
    return baseId + '.' + nextRevision;
}

From source file:org.loklak.susi.SusiMind.java

/**
 * This is the core principle of creativity: being able to match a given input
 * with problem-solving knowledge.//  www.j a va  2 s. c om
 * This method finds ideas (with a query instantiated rules) for a given query.
 * The rules are selected using a scoring system and pattern matching with the query.
 * Not only the most recent user query is considered for rule selection but also
 * previously requested queries and their answers to be able to set new rule selections
 * in the context of the previous conversation.
 * @param query the user input
 * @param previous_argument the latest conversation with the same user
 * @param maxcount the maximum number of ideas to return
 * @return an ordered list of ideas, first idea should be considered first.
 */
public List<SusiIdea> creativity(String query, SusiThought latest_thought, int maxcount) {
    // tokenize query to have hint for idea collection
    final List<SusiIdea> ideas = new ArrayList<>();
    this.reader.tokenizeSentence(query).forEach(token -> {
        Set<SusiRule> rule_for_category = this.ruletrigger.get(token.categorized);
        Set<SusiRule> rule_for_original = token.original.equals(token.categorized) ? null
                : this.ruletrigger.get(token.original);
        Set<SusiRule> r = new HashSet<>();
        if (rule_for_category != null)
            r.addAll(rule_for_category);
        if (rule_for_original != null)
            r.addAll(rule_for_original);
        r.forEach(rule -> ideas.add(new SusiIdea(rule).setIntent(token)));
    });

    //for (SusiIdea idea: ideas) System.out.println("idea.phrase-1:" + idea.getRule().getPhrases().toString());

    // add catchall rules always (those are the 'bad ideas')
    Collection<SusiRule> ca = this.ruletrigger.get(SusiRule.CATCHALL_KEY);
    if (ca != null)
        ca.forEach(rule -> ideas.add(new SusiIdea(rule)));

    // create list of all ideas that might apply
    TreeMap<Long, List<SusiIdea>> scored = new TreeMap<>();
    AtomicLong count = new AtomicLong(0);
    ideas.forEach(idea -> {
        int score = idea.getRule().getScore();
        long orderkey = Long.MAX_VALUE - ((long) score) * 1000L + count.incrementAndGet();
        List<SusiIdea> r = scored.get(orderkey);
        if (r == null) {
            r = new ArrayList<>();
            scored.put(orderkey, r);
        }
        r.add(idea);
    });

    // make a sorted list of all ideas
    ideas.clear();
    scored.values().forEach(r -> ideas.addAll(r));

    //for (SusiIdea idea: ideas) System.out.println("idea.phrase-2: score=" + idea.getRule().getScore() + " : " + idea.getRule().getPhrases().toString());

    // test ideas and collect those which match up to maxcount
    List<SusiIdea> plausibleIdeas = new ArrayList<>(Math.min(10, maxcount));
    for (SusiIdea idea : ideas) {
        SusiRule rule = idea.getRule();
        Collection<Matcher> m = rule.matcher(query);
        if (m.isEmpty())
            continue;
        // TODO: evaluate leading SEE flow commands right here as well
        plausibleIdeas.add(idea);
        if (plausibleIdeas.size() >= maxcount)
            break;
    }

    for (SusiIdea idea : plausibleIdeas)
        System.out.println("idea.phrase-3: score=" + idea.getRule().getScore() + " : "
                + idea.getRule().getPhrases().toString());

    return plausibleIdeas;
}

From source file:com.taobao.tddl.common.util.TDDLMBeanServer.java

private String getId(String name, String idPrefix) {
    ConcurrentHashMap<String, AtomicLong> subMap = idMap.get(name);
    if (null == subMap) {
        lock.lock();/*  w  ww  .j  a v a2 s  .co  m*/
        try {
            subMap = idMap.get(name);
            if (null == subMap) {
                subMap = new ConcurrentHashMap<String, AtomicLong>();
                idMap.put(name, subMap);
            }
        } finally {
            lock.unlock();
        }
    }

    AtomicLong indexValue = subMap.get(idPrefix);
    if (null == indexValue) {
        lock.lock();
        try {
            indexValue = subMap.get(idPrefix);
            if (null == indexValue) {
                indexValue = new AtomicLong(0);
                subMap.put(idPrefix, indexValue);
            }
        } finally {
            lock.unlock();
        }
    }
    long value = indexValue.incrementAndGet();
    String result = idPrefix + "-" + value;
    return result;
}

From source file:org.jsfr.json.JacksonParserTest.java

@Test
public void testLargeJsonJackson() throws Exception {
    final AtomicLong counter = new AtomicLong();
    ObjectMapper om = new ObjectMapper();
    JsonFactory f = new JsonFactory();
    JsonParser jp = f.createParser(read("allthethings.json"));
    long start = System.currentTimeMillis();
    jp.nextToken();/*from w  w  w . j a  v a  2  s .  com*/
    jp.nextToken();
    jp.nextToken();
    while (jp.nextToken() == JsonToken.FIELD_NAME) {
        if (jp.nextToken() == JsonToken.START_OBJECT) {
            TreeNode tree = om.readTree(jp);
            counter.incrementAndGet();
            LOGGER.trace("value: {}", tree);
        }
    }
    jp.close();
    LOGGER.info("Jackson processes {} value in {} millisecond", counter.get(),
            System.currentTimeMillis() - start);
}

From source file:com.pinterest.pinlater.client.PinLaterQueryIssuer.java

private void issueDequeueAckRequests(final PinLater.ServiceIface iface) throws InterruptedException {
    Preconditions.checkNotNull(queueName, "Queue was not specified.");
    final AtomicLong queriesIssued = new AtomicLong(0);
    final Semaphore permits = new Semaphore(concurrency);
    while (numQueries == -1 || queriesIssued.get() < numQueries) {
        final PinLaterDequeueRequest request = new PinLaterDequeueRequest();
        request.setQueueName(queueName);
        request.setLimit(batchSize);/*from ww w  . j ava2s.c o m*/
        final long startTimeNanos = System.nanoTime();
        queriesIssued.incrementAndGet();
        permits.acquire();
        iface.dequeueJobs(REQUEST_CONTEXT, request)
                .flatMap(new Function<PinLaterDequeueResponse, Future<Void>>() {
                    @Override
                    public Future<Void> apply(PinLaterDequeueResponse response) {
                        if (response.getJobsSize() == 0) {
                            return Future.Void();
                        }

                        PinLaterJobAckRequest jobAckRequest = new PinLaterJobAckRequest(queueName);
                        for (String job : response.getJobs().keySet()) {
                            if (random.nextInt(100) < dequeueSuccessPercent) {
                                jobAckRequest.addToJobsSucceeded(new PinLaterJobAckInfo(job));
                            } else {
                                jobAckRequest.addToJobsFailed(new PinLaterJobAckInfo(job));
                            }
                        }
                        return iface.ackDequeuedJobs(REQUEST_CONTEXT, jobAckRequest);
                    }
                }).respond(new Function<Try<Void>, BoxedUnit>() {
                    @Override
                    public BoxedUnit apply(Try<Void> voidTry) {
                        permits.release();
                        statsLogger
                                .requestComplete(Duration.fromNanoseconds(System.nanoTime() - startTimeNanos));
                        if (voidTry.isThrow()) {
                            LOG.info("Exception for request: " + request + " : " + ((Throw) voidTry).e());
                        }
                        return BoxedUnit.UNIT;
                    }
                });
    }
    permits.acquire(concurrency);
    LOG.info("Dequeue/ack queries issued: " + queriesIssued);
}

From source file:com.facebook.presto.accumulo.index.Indexer.java

private void addIndexMutation(ByteBuffer row, ByteBuffer family, ColumnVisibility visibility,
        byte[] qualifier) {
    // Create the mutation and add it to the batch writer
    Mutation indexMutation = new Mutation(row.array());
    indexMutation.put(family.array(), qualifier, visibility, EMPTY_BYTES);
    try {//w  w w. ja v  a 2 s  . c o m
        indexWriter.addMutation(indexMutation);
    } catch (MutationsRejectedException e) {
        throw new PrestoException(UNEXPECTED_ACCUMULO_ERROR, "Index mutation rejected by server", e);
    }

    // Increment the cardinality metrics for this value of index
    // metrics is a mapping of row ID to column family
    MetricsKey key = new MetricsKey(row, family, visibility);
    AtomicLong count = metrics.get(key);
    if (count == null) {
        count = new AtomicLong(0);
        metrics.put(key, count);
    }

    count.incrementAndGet();
}

From source file:org.codice.ddf.spatial.ogc.csw.catalog.transformer.CswQueryResponseTransformerTest.java

@Test
public void testMarshalAcknowledgementWithFailedTransforms()
        throws WebApplicationException, IOException, JAXBException, CatalogTransformerException {

    GetRecordsType query = new GetRecordsType();
    query.setResultType(ResultType.RESULTS);
    query.setMaxRecords(BigInteger.valueOf(6));
    query.setStartPosition(BigInteger.valueOf(0));
    SourceResponse sourceResponse = createSourceResponse(query, 6);

    Map<String, Serializable> args = new HashMap<>();
    args.put(CswConstants.RESULT_TYPE_PARAMETER, ResultType.RESULTS);
    args.put(CswConstants.GET_RECORDS, query);

    PrintWriter printWriter = getSimplePrintWriter();
    MetacardTransformer mockMetacardTransformer = mock(MetacardTransformer.class);

    final AtomicLong atomicLong = new AtomicLong(0);
    when(mockMetacardTransformer.transform(any(Metacard.class), anyMap())).then(invocationOnMock -> {
        if (atomicLong.incrementAndGet() == 2) {
            throw new CatalogTransformerException("");
        }//from w  ww .  j a va 2s.c  om

        Metacard metacard = (Metacard) invocationOnMock.getArguments()[0];
        BinaryContentImpl bci = new BinaryContentImpl(IOUtils.toInputStream(metacard.getId() + ","),
                new MimeType("application/xml"));
        return bci;
    });

    when(mockPrintWriterProvider.build((Class<Metacard>) notNull())).thenReturn(printWriter);
    when(mockTransformerManager.getTransformerBySchema(anyString())).thenReturn(mockMetacardTransformer);

    CswQueryResponseTransformer cswQueryResponseTransformer = new CswQueryResponseTransformer(
            mockTransformerManager, mockPrintWriterProvider);
    cswQueryResponseTransformer.init();
    BinaryContent content = cswQueryResponseTransformer.transform(sourceResponse, args);
    cswQueryResponseTransformer.destroy();

    String xml = new String(content.getByteArray());
    assertThat(xml, containsString(CswQueryResponseTransformer.NUMBER_OF_RECORDS_MATCHED_ATTRIBUTE + " 6"));
    assertThat(xml, containsString(CswQueryResponseTransformer.NUMBER_OF_RECORDS_RETURNED_ATTRIBUTE + " 5"));
    assertThat(xml, containsString(CswQueryResponseTransformer.NEXT_RECORD_ATTRIBUTE + " 0"));
}

From source file:com.jivesoftware.os.amza.service.storage.binary.BinaryRowReaderWriterTest.java

@Test(enabled = false)
public void testConcurrency() throws Exception {
    MemoryBackedWALFiler walFiler = new MemoryBackedWALFiler(
            new MultiAutoGrowingByteBufferBackedFiler(32, 1_024 * 1_024, new HeapByteBufferFactory()));
    IoStats ioStats = new IoStats();
    BinaryRowReader binaryRowReader = new BinaryRowReader(walFiler);
    BinaryRowWriter binaryRowWriter = new BinaryRowWriter(walFiler);

    ExecutorService executors = Executors.newFixedThreadPool(9);
    AtomicBoolean running = new AtomicBoolean(true);
    AtomicLong scanned = new AtomicLong();
    List<Future<?>> futures = Lists.newArrayList();
    for (int i = 0; i < 8; i++) {
        futures.add(executors.submit(() -> {
            try {
                while (running.get()) {
                    binaryRowReader.scan(ioStats, 0, false, (rowFP, rowTxId, rowType, row) -> {
                        scanned.incrementAndGet();
                        return true;
                    });/*from  w  ww.  j a  v  a  2  s  .  co m*/
                }
                return true;
            } catch (Throwable t) {
                t.printStackTrace();
                throw t;
            }
        }));
    }
    futures.add(executors.submit(() -> {
        try {
            for (int i = 0; i < 1_000_000; i++) {
                byte[] row = UIO.intBytes(i);
                binaryRowWriter.write(ioStats, i, RowType.primary, 1, 16, stream -> stream.stream(row),
                        stream -> true,
                        (txId, prefix, key, value, valueTimestamp, valueTombstoned, valueVersion, fp) -> true,
                        false, false);
                if (i % 10_000 == 0) {
                    System.out.println("Finished i:" + i + " scanned:" + scanned.get());
                }
            }
        } finally {
            running.set(false);
        }
        return null;
    }));

    for (Future<?> future : futures) {
        future.get();
    }
}

From source file:stroom.index.server.BenchmarkIndex.java

@Override
public void run() {
    init();/*from  w  w w.j ava 2 s .c o m*/

    final long batchStartTime = System.currentTimeMillis();

    final IndexShardWriterImpl[] writers = new IndexShardWriterImpl[indexShards.length];
    for (int i = 0; i < writers.length; i++) {
        final IndexShard indexShard = indexShards[i];
        writers[i] = new IndexShardWriterImpl(indexShardService, indexFields, indexShard.getIndex(),
                indexShard);
        writers[i].setRamBufferSizeMB(ramBufferMbSize);
        writers[i].open(true);
    }
    final AtomicLong atomicLong = new AtomicLong();

    final long indexStartTime = System.currentTimeMillis();

    final ExecutorService threadPoolExecutor = Executors.newFixedThreadPool(jobSize);
    for (int i = 0; i < jobSize; i++) {
        final Runnable r = () -> {
            long myId;
            while ((myId = atomicLong.incrementAndGet()) < docCount) {
                try {
                    final int idx = (int) (myId % writers.length);
                    writers[idx].addDocument(getDocument(myId));
                } catch (final Exception e) {
                    e.printStackTrace();
                }
            }
        };
        threadPoolExecutor.execute(r);
    }

    threadPoolExecutor.shutdown();

    // Wait for termination.
    while (!threadPoolExecutor.isTerminated()) {
        // Wait 1 second.
        ThreadUtil.sleep(1000);

        final long docsSoFar = atomicLong.get();
        final long secondsSoFar = (System.currentTimeMillis() - batchStartTime) / 1000;

        for (int i = 0; i < writers.length; i++) {
            final IndexShardWriterImpl impl = writers[i];
            final IndexShard indexShard = indexShards[i];

            if (secondsSoFar > 0) {
                final long docsPerSecond = docsSoFar / secondsSoFar;
                impl.sync();
                LOGGER.info("run() - " + StringUtils.rightPad(ModelStringUtil.formatCsv(docsSoFar), 10)
                        + " doc ps " + ModelStringUtil.formatCsv(docsPerSecond) + " ("
                        + indexShard.getFileSizeString() + ")");
            }
            if (nextCommit != null && docsSoFar > nextCommit) {
                impl.flush();
                nextCommit = ((docsSoFar / commitCount) * commitCount) + commitCount;
                LOGGER.info("run() - commit " + docsSoFar + " next commit is " + nextCommit);
            }
        }
    }
    final long indexEndTime = System.currentTimeMillis();
    final long secondsSoFar = (System.currentTimeMillis() - batchStartTime) / 1000;
    final long docsPerSecond = atomicLong.get() / secondsSoFar;

    for (final IndexShardWriter writer : writers) {
        writer.close();
    }

    final long batchEndTime = System.currentTimeMillis();

    LOGGER.info("runWrite() - Complete");
    LOGGER.info("=====================");
    LOGGER.info("");
    LOGGER.info("Using Args");
    LOGGER.info("==========");
    LoggerPrintStream traceStream = LoggerPrintStream.create(LOGGER, false);
    traceArguments(traceStream);
    traceStream.close();
    LOGGER.info("");
    LOGGER.info("Stats");
    LOGGER.info("=====");

    LOGGER.info("Open Time  " + toMsNiceString(indexStartTime - batchStartTime));
    LOGGER.info("Index Time " + toMsNiceString(indexEndTime - indexStartTime));
    LOGGER.info("Close Time " + toMsNiceString(batchEndTime - indexEndTime));
    LOGGER.info("Total Time " + toMsNiceString(batchEndTime - batchStartTime));
    LOGGER.info("");
    LOGGER.info("Final Docs PS " + ModelStringUtil.formatCsv(docsPerSecond));

    traceStream = LoggerPrintStream.create(LOGGER, false);
    for (int i = 0; i < writers.length; i++) {
        LOGGER.info("");
        final IndexShardWriterImpl impl = writers[i];
        LOGGER.info("Writer " + StringUtils.leftPad(String.valueOf(i), 2));
        LOGGER.info("=========");
        impl.trace(traceStream);
    }
    traceStream.close();

    LOGGER.info("");
    LOGGER.info("Search");
    LOGGER.info("=====");

    try {
        final IndexShardSearcherImpl[] reader = new IndexShardSearcherImpl[indexShards.length];
        final IndexReader[] readers = new IndexReader[indexShards.length];
        for (int i = 0; i < reader.length; i++) {
            reader[i] = new IndexShardSearcherImpl(indexShards[i]);
            reader[i].open();
            readers[i] = reader[i].getReader();
        }

        for (final String arg : docArgs) {
            doSearchOnField(readers, arg);
        }

        doSearchOnField(readers, "multifield");
        doSearchOnField(readers, "dupfield");

        LOGGER.info("=====");

        for (int i = 0; i < reader.length; i++) {
            reader[i].close();
        }

    } catch (final Exception ex) {
        ex.printStackTrace();
    }

}