Example usage for org.apache.commons.lang.mutable MutableLong increment

List of usage examples for org.apache.commons.lang.mutable MutableLong increment

Introduction

In this page you can find the example usage for org.apache.commons.lang.mutable MutableLong increment.

Prototype

public void increment() 

Source Link

Document

Increments the value.

Usage

From source file:com.datatorrent.lib.io.jms.AbstractJMSInputOperator.java

/**
 * This method is called when a message is added to {@link #holdingBuffer} and can be overwritten by subclasses
 * if required. This is called by the JMS thread not Operator thread.
 *
 * @param message//from ww w.ja  va 2  s.c o m
 * @return message is accepted.
 * @throws javax.jms.JMSException
 */
protected boolean messageConsumed(Message message) throws JMSException {
    if (message.getJMSRedelivered() && pendingAck.contains(message.getJMSMessageID())) {
        counters.getCounter(CounterKeys.REDELIVERED).increment();
        LOG.warn("IGNORING: Redelivered Message {}", message.getJMSMessageID());
        return false;
    }
    pendingAck.add(message.getJMSMessageID());
    MutableLong receivedCt = counters.getCounter(CounterKeys.RECEIVED);
    receivedCt.increment();
    LOG.debug("message id: {} buffer size: {} received: {}", message.getJMSMessageID(), holdingBuffer.size(),
            receivedCt.longValue());
    return true;
}

From source file:com.jivesoftware.os.amza.service.AmzaService.java

private boolean streamOnline(RingMember ringMember, VersionedPartitionName versionedPartitionName,
        long highestTransactionId, long leadershipToken, long limit, DataOutputStream dos, MutableLong bytes,
        HighwaterStorage highwaterStorage, PartitionStripe.RowStreamer streamer) throws Exception {

    ackWaters.set(ringMember, versionedPartitionName, highestTransactionId, leadershipToken);
    dos.writeLong(leadershipToken);/* w  w  w.ja  va  2 s  . co  m*/
    dos.writeLong(versionedPartitionName.getPartitionVersion());
    dos.writeByte(1); // fully online
    bytes.increment();
    RingTopology ring = ringStoreReader.getRing(versionedPartitionName.getPartitionName().getRingName(), -1);
    for (int i = 0; i < ring.entries.size(); i++) {
        if (ring.rootMemberIndex != i) {
            RingMemberAndHost entry = ring.entries.get(i);
            long highwatermark = highwaterStorage.get(entry.ringMember, versionedPartitionName);
            byte[] ringMemberBytes = entry.ringMember.toBytes();
            dos.writeByte(1);
            dos.writeInt(ringMemberBytes.length);
            dos.write(ringMemberBytes);
            dos.writeLong(highwatermark);
            bytes.add(1 + 4 + ringMemberBytes.length + 8);
        }
    }

    dos.writeByte(0); // last entry marker
    bytes.increment();

    long[] limited = new long[1];
    long[] lastRowTxId = { -1 };
    boolean streamedToEnd = streamer.stream((rowFP, rowTxId, rowType, row) -> {
        if (limited[0] >= limit && lastRowTxId[0] < rowTxId) {
            return false;
        }
        lastRowTxId[0] = rowTxId;
        dos.writeByte(1);
        dos.writeLong(rowTxId);
        dos.writeByte(rowType.toByte());
        dos.writeInt(row.length);
        dos.write(row);
        bytes.add(1 + 8 + 1 + 4 + row.length);
        limited[0]++;
        return true;
    });

    dos.writeByte(0); // last entry marker
    bytes.increment();
    dos.writeByte(streamedToEnd ? 1 : 0); // streamedToEnd marker
    bytes.increment();
    return false;
}

From source file:eu.project.ttc.engines.morpho.CompostAE.java

@Override
public void collectionProcessComplete() throws AnalysisEngineProcessException {
    SubTaskObserver observer = observerResource.getTaskObserver(TASK_NAME);
    observer.setTotalTaskWork(termIndexResource.getTermIndex().getWords().size());
    LOGGER.info("Starting morphologyical compound detection for TermIndex {}",
            this.termIndexResource.getTermIndex().getName());
    LOGGER.debug(this.toString());
    wrMeasure = termIndexResource.getTermIndex().getWRMeasure();
    swtLemmaIndex = termIndexResource.getTermIndex().getCustomIndex(TermIndexes.SINGLE_WORD_LEMMA);
    buildCompostIndex();/* ww w .  j a v  a2  s.  co m*/

    final MutableLong cnt = new MutableLong(0);

    Timer progressLoggerTimer = new Timer("Morphosyntactic splitter AE");
    progressLoggerTimer.schedule(new TimerTask() {
        @Override
        public void run() {
            int total = termIndexResource.getTermIndex().getWords().size();
            CompostAE.LOGGER.info("Progress: {}% ({} on {})",
                    String.format("%.2f", ((float) cnt.longValue() * 100) / total), cnt.longValue(), total);
        }
    }, 5000l, 5000l);

    int observingStep = 100;
    for (Term swt : termIndexResource.getTermIndex().getTerms()) {
        if (!swt.isSingleWord())
            continue;
        cnt.increment();
        if (cnt.longValue() % observingStep == 0) {
            observer.work(observingStep);
        }

        /*
         * Do not do native morphology splitting 
         * if a composition already exists.
         */
        Word word = swt.getWords().get(0).getWord();
        if (word.isCompound())
            continue;

        Map<Segmentation, Double> scores = computeScores(word.getLemma());
        if (scores.size() > 0) {

            List<Segmentation> segmentations = Lists.newArrayList(scores.keySet());

            /*
             *  compare segmentations in a deterministic way.
             */
            segmentations.sort(new Comparator<Segmentation>() {
                @Override
                public int compare(Segmentation o1, Segmentation o2) {
                    int comp = Double.compare(scores.get(o2), scores.get(o1));
                    if (comp != 0)
                        return comp;
                    comp = Integer.compare(o1.getSegments().size(), o2.getSegments().size());
                    if (comp != 0)
                        return comp;
                    for (int i = 0; i < o1.getSegments().size(); i++) {
                        comp = Integer.compare(o2.getSegments().get(i).getEnd(),
                                o1.getSegments().get(i).getEnd());
                        if (comp != 0)
                            return comp;
                    }
                    return 0;
                }
            });

            Segmentation bestSegmentation = segmentations.get(0);

            // build the word component from segmentation
            WordBuilder builder = new WordBuilder(word);

            for (Segment seg : bestSegmentation.getSegments()) {
                String lemma = segmentLemmaCache.getUnchecked(seg.getLemma());
                builder.addComponent(seg.getBegin(), seg.getEnd(), lemma);
                if (seg.isNeoclassical())
                    builder.setCompoundType(CompoundType.NEOCLASSICAL);
                else
                    builder.setCompoundType(CompoundType.NATIVE);
            }
            builder.create();

            // log the word composition
            if (LOGGER.isTraceEnabled()) {
                List<String> componentStrings = Lists.newArrayList();
                for (Component component : word.getComponents())
                    componentStrings.add(component.toString());
                LOGGER.trace("{} [{}]", word.getLemma(), Joiner.on(' ').join(componentStrings));
            }
        }
    }

    //finalize
    progressLoggerTimer.cancel();

    LOGGER.debug("segment score cache size: {}", segmentScoreEntries.size());
    LOGGER.debug("segment score hit count: " + segmentScoreEntries.stats().hitCount());
    LOGGER.debug("segment score hit rate: " + segmentScoreEntries.stats().hitRate());
    LOGGER.debug("segment score eviction count: " + segmentScoreEntries.stats().evictionCount());
    termIndexResource.getTermIndex().dropCustomIndex(TermIndexes.SINGLE_WORD_LEMMA);
    segmentScoreEntries.invalidateAll();
    segmentLemmaCache.invalidateAll();
}

From source file:com.palantir.atlasdb.schema.TableMigratorTest.java

@Test
public void testMigrationToDifferentKvs() {
    final String tableName = "table";
    final String namespacedTableName = "namespace." + tableName;
    TableDefinition definition = new TableDefinition() {
        {//w ww .  j ava 2s .  co  m
            rowName();
            rowComponent("r", ValueType.BLOB);
            columns();
            column("c", "c", ValueType.BLOB);
        }
    };
    SimpleSchemaUpdater updater = SimpleSchemaUpdaterImpl.create(keyValueService, Namespace.DEFAULT_NAMESPACE);
    updater.addTable(tableName, definition);
    int maxValueSize = definition.getMaxValueSize();
    keyValueService.createTable(namespacedTableName, maxValueSize);
    keyValueService.putMetadataForTable(namespacedTableName, definition.toTableMetadata().persistToBytes());

    TableMappingService tableMap = StaticTableMappingService.create(keyValueService);
    final String shortTableName = tableMap
            .getShortTableName(TableReference.create(Namespace.create("namespace"), tableName));

    final Cell theCell = Cell.create(PtBytes.toBytes("r1"), PtBytes.toBytes("c"));
    final byte[] theValue = PtBytes.toBytes("v1");
    txManager.runTaskWithRetry(new TransactionTask<Void, RuntimeException>() {
        @Override
        public Void execute(Transaction t) {
            Map<Cell, byte[]> values = ImmutableMap.of(theCell, theValue);
            t.put("default." + tableName, values);
            t.put(namespacedTableName, values);
            return null;
        }
    });

    // migration doesn't use namespace mapping
    final InMemoryKeyValueService kvs2 = new InMemoryKeyValueService(false);
    final ConflictDetectionManager cdm2 = ConflictDetectionManagers.withoutConflictDetection(kvs2);
    final SweepStrategyManager ssm2 = SweepStrategyManagers.completelyConservative(kvs2);
    final TestTransactionManagerImpl txManager2 = new TestTransactionManagerImpl(kvs2, timestampService,
            lockClient, lockService, transactionService, cdm2, ssm2);
    SimpleSchemaUpdater updater2 = SimpleSchemaUpdaterImpl.create(kvs2, Namespace.DEFAULT_NAMESPACE);
    updater2.addTable(tableName, definition);
    kvs2.createTable(shortTableName, maxValueSize);
    kvs2.putMetadataForTable(shortTableName, definition.toTableMetadata().persistToBytes());

    GeneralTaskCheckpointer checkpointer = new GeneralTaskCheckpointer("checkpoint", kvs2, txManager2);
    // The namespaced table is migrated under the short name.
    for (final String name : Lists.newArrayList("default." + tableName, shortTableName)) {
        TransactionRangeMigrator rangeMigrator = new TransactionRangeMigratorBuilder().srcTable(name)
                .readTxManager(txManager).txManager(txManager2).checkpointer(checkpointer).build();
        TableMigratorBuilder builder = new TableMigratorBuilder().srcTable(name).partitions(1)
                .executor(PTExecutors.newSingleThreadExecutor()).checkpointer(checkpointer)
                .rangeMigrator(rangeMigrator);
        TableMigrator migrator = builder.build();
        migrator.migrate();
    }
    checkpointer.deleteCheckpoints();

    final KeyValueService verifyKvs = NamespaceMappingKeyValueService
            .create(TableRemappingKeyValueService.create(kvs2, tableMap));
    final ConflictDetectionManager verifyCdm = ConflictDetectionManagers.withoutConflictDetection(verifyKvs);
    final SweepStrategyManager verifySsm = SweepStrategyManagers.completelyConservative(verifyKvs);
    final TestTransactionManagerImpl verifyTxManager = new TestTransactionManagerImpl(verifyKvs,
            timestampService, lockClient, lockService, transactionService, verifyCdm, verifySsm);
    final MutableLong count = new MutableLong();
    for (final String name : Lists.newArrayList(tableName, namespacedTableName)) {
        verifyTxManager.runTaskReadOnly(new TransactionTask<Void, RuntimeException>() {
            @Override
            public Void execute(Transaction t) {
                BatchingVisitable<RowResult<byte[]>> bv = t.getRange(name, RangeRequest.all());
                bv.batchAccept(1000,
                        AbortingVisitors.batching(new AbortingVisitor<RowResult<byte[]>, RuntimeException>() {
                            @Override
                            public boolean visit(RowResult<byte[]> item) {
                                Iterable<Entry<Cell, byte[]>> cells = item.getCells();
                                Entry<Cell, byte[]> e = Iterables.getOnlyElement(cells);
                                Assert.assertEquals(theCell, e.getKey());
                                Assert.assertArrayEquals(theValue, e.getValue());
                                count.increment();
                                return true;
                            }
                        }));
                return null;
            }
        });
    }
    Assert.assertEquals(2L, count.longValue());
}

From source file:com.jivesoftware.os.rcvs.hbase094.HBaseRowColumnValueStore.java

private <K> boolean completeFutureCallbacks(Long maxCount, CallbackStream<K> callback, MutableLong gotCount,
        List<Future<K>> marshalFutures) throws InterruptedException, ExecutionException {

    for (Future<K> future : marshalFutures) {
        K marshalled = future.get();//from   w w  w. j av a 2 s  .  c  o m
        if (marshalled == null) {
            continue;
        }

        try {
            K returned = callback.callback(marshalled);
            if (marshalled != returned) {
                return true;
            }
            gotCount.increment();
            if (maxCount != null) {
                if (gotCount.longValue() >= maxCount) {
                    return true;
                }
            }
        } catch (Exception ex) {
            throw new CallbackStreamException(ex);
        }
    }
    return false;
}

From source file:org.apache.accumulo.core.client.impl.ConditionalWriterImpl.java

private void convertMutations(TabletServerMutations<QCMutation> mutations, Map<Long, CMK> cmidToCm,
        MutableLong cmid, Map<TKeyExtent, List<TConditionalMutation>> tmutations,
        CompressedIterators compressedIters) {

    for (Entry<KeyExtent, List<QCMutation>> entry : mutations.getMutations().entrySet()) {
        TKeyExtent tke = entry.getKey().toThrift();
        ArrayList<TConditionalMutation> tcondMutaions = new ArrayList<TConditionalMutation>();

        List<QCMutation> condMutations = entry.getValue();

        for (QCMutation cm : condMutations) {
            TMutation tm = cm.toThrift();

            List<TCondition> conditions = convertConditions(cm, compressedIters);

            cmidToCm.put(cmid.longValue(), new CMK(entry.getKey(), cm));
            TConditionalMutation tcm = new TConditionalMutation(conditions, tm, cmid.longValue());
            cmid.increment();
            tcondMutaions.add(tcm);/*from  www.  j a v  a  2  s. co m*/
        }

        tmutations.put(tke, tcondMutaions);
    }
}

From source file:org.apache.accumulo.core.client.summary.CountingSummarizer.java

@Override
public Collector collector(SummarizerConfiguration sc) {
    init(sc);/*from w  w  w .  j  a  v a2 s  . c o  m*/
    return new Collector() {

        // Map used for computing summary incrementally uses ByteSequence for key which is more
        // efficient than converting String for each Key. The
        // conversion to String is deferred until the summary is requested.

        private Map<K, MutableLong> counters = new HashMap<>();
        private long tooMany = 0;
        private long tooLong = 0;
        private long seen = 0;
        private long emitted = 0;
        private long deleted = 0;
        private Converter<K> converter = converter();
        private Function<K, String> encoder = encoder();
        private UnaryOperator<K> copier = copier();

        private void incrementCounter(K counter) {
            emitted++;

            MutableLong ml = counters.get(counter);
            if (ml == null) {
                if (counters.size() >= maxCounters) {
                    // no need to store this counter in the map and get() it... just use instance variable
                    tooMany++;
                } else {
                    // we have never seen this key before, check if its too long
                    if (encoder.apply(counter).length() >= maxCounterKeyLen) {
                        tooLong++;
                    } else {
                        counters.put(copier.apply(counter), new MutableLong(1));
                    }
                }
            } else {
                // using mutable long allows calling put() to be avoided
                ml.increment();
            }
        }

        @Override
        public void accept(Key k, Value v) {
            seen++;
            if (ignoreDeletes && k.isDeleted()) {
                deleted++;
            } else {
                converter.convert(k, v, this::incrementCounter);
            }
        }

        @Override
        public void summarize(StatisticConsumer sc) {
            StringBuilder sb = new StringBuilder(COUNTER_STAT_PREFIX);

            for (Entry<K, MutableLong> entry : counters.entrySet()) {
                sb.setLength(COUNTER_STAT_PREFIX.length());
                sb.append(encoder.apply(entry.getKey()));
                sc.accept(sb.toString(), entry.getValue().longValue());
            }

            sc.accept(TOO_MANY_STAT, tooMany);
            sc.accept(TOO_LONG_STAT, tooLong);
            sc.accept(EMITTED_STAT, emitted);
            sc.accept(SEEN_STAT, seen);
            sc.accept(DELETES_IGNORED_STAT, deleted);
        }
    };
}

From source file:org.apache.accumulo.core.clientImpl.ConditionalWriterImpl.java

private void convertMutations(TabletServerMutations<QCMutation> mutations, Map<Long, CMK> cmidToCm,
        MutableLong cmid, Map<TKeyExtent, List<TConditionalMutation>> tmutations,
        CompressedIterators compressedIters) {

    for (Entry<KeyExtent, List<QCMutation>> entry : mutations.getMutations().entrySet()) {
        TKeyExtent tke = entry.getKey().toThrift();
        ArrayList<TConditionalMutation> tcondMutaions = new ArrayList<>();

        List<QCMutation> condMutations = entry.getValue();

        for (QCMutation cm : condMutations) {
            TMutation tm = cm.toThrift();

            List<TCondition> conditions = convertConditions(cm, compressedIters);

            cmidToCm.put(cmid.longValue(), new CMK(entry.getKey(), cm));
            TConditionalMutation tcm = new TConditionalMutation(conditions, tm, cmid.longValue());
            cmid.increment();
            tcondMutaions.add(tcm);/*from w w w .j  a v  a  2 s .  com*/
        }

        tmutations.put(tke, tcondMutaions);
    }
}

From source file:org.apache.bookkeeper.bookie.InterleavedLedgerStorage.java

@Override
public List<DetectedInconsistency> localConsistencyCheck(Optional<RateLimiter> rateLimiter) throws IOException {
    long checkStart = MathUtils.nowInNano();
    LOG.info("Starting localConsistencyCheck");
    long checkedLedgers = 0;
    long checkedPages = 0;
    final MutableLong checkedEntries = new MutableLong(0);
    final MutableLong pageRetries = new MutableLong(0);
    NavigableMap<Long, Boolean> bkActiveLedgersSnapshot = activeLedgers.snapshot();
    final List<DetectedInconsistency> errors = new ArrayList<>();
    for (Long ledger : bkActiveLedgersSnapshot.keySet()) {
        try (LedgerCache.PageEntriesIterable pages = ledgerCache.listEntries(ledger)) {
            for (LedgerCache.PageEntries page : pages) {
                @Cleanup//from  w w w .  j av a2s .c o m
                LedgerEntryPage lep = page.getLEP();
                MutableBoolean retry = new MutableBoolean(false);
                do {
                    retry.setValue(false);
                    int version = lep.getVersion();

                    MutableBoolean success = new MutableBoolean(true);
                    long start = MathUtils.nowInNano();
                    lep.getEntries((entry, offset) -> {
                        rateLimiter.ifPresent(RateLimiter::acquire);

                        try {
                            entryLogger.checkEntry(ledger, entry, offset);
                            checkedEntries.increment();
                        } catch (EntryLogger.EntryLookupException e) {
                            if (version != lep.getVersion()) {
                                pageRetries.increment();
                                if (lep.isDeleted()) {
                                    LOG.debug("localConsistencyCheck: ledger {} deleted", ledger);
                                } else {
                                    LOG.debug("localConsistencyCheck: concurrent modification, retrying");
                                    retry.setValue(true);
                                    retryCounter.inc();
                                }
                                return false;
                            } else {
                                errors.add(new DetectedInconsistency(ledger, entry, e));
                                LOG.error("Got error: ", e);
                            }
                            success.setValue(false);
                        }
                        return true;
                    });

                    if (success.booleanValue()) {
                        pageScanStats.registerSuccessfulEvent(MathUtils.elapsedNanos(start),
                                TimeUnit.NANOSECONDS);
                    } else {
                        pageScanStats.registerFailedEvent(MathUtils.elapsedNanos(start), TimeUnit.NANOSECONDS);
                    }
                } while (retry.booleanValue());
                checkedPages++;
            }
        } catch (NoLedgerException | FileInfo.FileInfoDeletedException e) {
            if (activeLedgers.containsKey(ledger)) {
                LOG.error("Cannot find ledger {}, should exist, exception is ", ledger, e);
                errors.add(new DetectedInconsistency(ledger, -1, e));
            } else {
                LOG.debug("ledger {} deleted since snapshot taken", ledger);
            }
        } catch (Exception e) {
            throw new IOException("Got other exception in localConsistencyCheck", e);
        }
        checkedLedgers++;
    }
    LOG.info(
            "Finished localConsistencyCheck, took {}s to scan {} ledgers, {} pages, "
                    + "{} entries with {} retries, {} errors",
            TimeUnit.NANOSECONDS.toSeconds(MathUtils.elapsedNanos(checkStart)), checkedLedgers, checkedPages,
            checkedEntries.longValue(), pageRetries.longValue(), errors.size());

    return errors;
}

From source file:org.apache.bookkeeper.bookie.storage.ldb.SingleDirectoryDbLedgerStorage.java

/**
 * Add an already existing ledger to the index.
 *
 * <p>This method is only used as a tool to help the migration from InterleaveLedgerStorage to DbLedgerStorage
 *
 * @param ledgerId/* ww w.  j av a2 s  . c  om*/
 *            the ledger id
 * @param pages
 *            Iterator over index pages from Indexed
 * @return the number of
 */
public long addLedgerToIndex(long ledgerId, boolean isFenced, byte[] masterKey,
        LedgerCache.PageEntriesIterable pages) throws Exception {
    LedgerData ledgerData = LedgerData.newBuilder().setExists(true).setFenced(isFenced)
            .setMasterKey(ByteString.copyFrom(masterKey)).build();
    ledgerIndex.set(ledgerId, ledgerData);
    MutableLong numberOfEntries = new MutableLong();

    // Iterate over all the entries pages
    Batch batch = entryLocationIndex.newBatch();
    for (LedgerCache.PageEntries page : pages) {
        try (LedgerEntryPage lep = page.getLEP()) {
            lep.getEntries((entryId, location) -> {
                entryLocationIndex.addLocation(batch, ledgerId, entryId, location);
                numberOfEntries.increment();
                return true;
            });
        }
    }

    batch.flush();
    batch.close();

    return numberOfEntries.longValue();
}