Example usage for com.google.common.collect Range upperEndpoint

List of usage examples for com.google.common.collect Range upperEndpoint

Introduction

In this page you can find the example usage for com.google.common.collect Range upperEndpoint.

Prototype

public C upperEndpoint() 

Source Link

Document

Returns the upper endpoint of this range.

Usage

From source file:org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl.java

/**
 * Get the number of entries between a contiguous range of two positions
 *
 * @param range/*from w w w . j  a v a 2s .c  om*/
 *            the position range
 * @return the count of entries
 */
long getNumberOfEntries(Range<PositionImpl> range) {
    PositionImpl fromPosition = range.lowerEndpoint();
    boolean fromIncluded = range.lowerBoundType() == BoundType.CLOSED;
    PositionImpl toPosition = range.upperEndpoint();
    boolean toIncluded = range.upperBoundType() == BoundType.CLOSED;

    if (fromPosition.getLedgerId() == toPosition.getLedgerId()) {
        // If the 2 positions are in the same ledger
        long count = toPosition.getEntryId() - fromPosition.getEntryId() - 1;
        count += fromIncluded ? 1 : 0;
        count += toIncluded ? 1 : 0;
        return count;
    } else {
        long count = 0;
        // If the from & to are pointing to different ledgers, then we need to :
        // 1. Add the entries in the ledger pointed by toPosition
        count += toPosition.getEntryId();
        count += toIncluded ? 1 : 0;

        // 2. Add the entries in the ledger pointed by fromPosition
        LedgerInfo li = ledgers.get(fromPosition.getLedgerId());
        if (li != null) {
            count += li.getEntries() - (fromPosition.getEntryId() + 1);
            count += fromIncluded ? 1 : 0;
        }

        // 3. Add the whole ledgers entries in between
        for (LedgerInfo ls : ledgers.subMap(fromPosition.getLedgerId(), false, toPosition.getLedgerId(), false)
                .values()) {
            count += ls.getEntries();
        }

        return count;
    }
}

From source file:org.apache.bookkeeper.mledger.impl.ManagedCursorImpl.java

/**
 *
 * @param newMarkDeletePosition//w  ww  .j a v  a2 s  .c  om
 *            the new acknowledged position
 * @return the previous acknowledged position
 */
PositionImpl setAcknowledgedPosition(PositionImpl newMarkDeletePosition) {
    if (newMarkDeletePosition.compareTo(markDeletePosition) < 0) {
        throw new IllegalArgumentException("Mark deleting an already mark-deleted position");
    }

    if (readPosition.compareTo(newMarkDeletePosition) <= 0) {
        // If the position that is mark-deleted is past the read position, it
        // means that the client has skipped some entries. We need to move
        // read position forward
        PositionImpl oldReadPosition = readPosition;
        readPosition = ledger.getNextValidPosition(newMarkDeletePosition);

        if (log.isDebugEnabled()) {
            log.debug("Moved read position from: {} to: {}", oldReadPosition, readPosition);
        }
    }

    PositionImpl oldMarkDeletePosition = markDeletePosition;

    if (!newMarkDeletePosition.equals(oldMarkDeletePosition)) {
        long skippedEntries = 0;
        if (newMarkDeletePosition.getLedgerId() == oldMarkDeletePosition.getLedgerId()
                && newMarkDeletePosition.getEntryId() == oldMarkDeletePosition.getEntryId() + 1) {
            // Mark-deleting the position next to current one
            skippedEntries = individualDeletedMessages.contains(newMarkDeletePosition) ? 0 : 1;
        } else {
            skippedEntries = getNumberOfEntries(Range.openClosed(oldMarkDeletePosition, newMarkDeletePosition));
        }
        PositionImpl positionAfterNewMarkDelete = ledger.getNextValidPosition(newMarkDeletePosition);
        if (individualDeletedMessages.contains(positionAfterNewMarkDelete)) {
            Range<PositionImpl> rangeToBeMarkDeleted = individualDeletedMessages
                    .rangeContaining(positionAfterNewMarkDelete);
            newMarkDeletePosition = rangeToBeMarkDeleted.upperEndpoint();
        }

        if (log.isDebugEnabled()) {
            log.debug("Moved ack position from: {} to: {} -- skipped: {}", oldMarkDeletePosition,
                    newMarkDeletePosition, skippedEntries);
        }
        messagesConsumedCounter += skippedEntries;
    }

    // markDelete-position and clear out deletedMsgSet
    markDeletePosition = PositionImpl.get(newMarkDeletePosition);
    individualDeletedMessages.remove(Range.atMost(markDeletePosition));

    return newMarkDeletePosition;
}

From source file:edu.mit.streamjit.impl.compiler2.SubsetBiasAverageAllocationStrategy.java

@Override
public void allocateGroup(ActorGroup group, Range<Integer> iterations, List<Core> cores, Configuration config) {
    int numCores = 0, biasCount = 0;
    List<ImmutableList<? extends Integer>> coreOrders = new ArrayList<>();
    float bias = 0;
    for (Actor a : group.actors()) {
        int id = a.id();
        numCores += config.getParameter("Group" + id + "CoreCount", Configuration.IntParameter.class)
                .getValue();/* ww w. j  a v  a  2s .  co  m*/
        Configuration.PermutationParameter<Integer> coreOrderParam = config.getParameter(
                "Group" + id + "CoreOrder", Configuration.PermutationParameter.class, Integer.class);
        coreOrders.add(coreOrderParam.getUniverse());
        int ourBiasCount = config.getParameter("Group" + id + "BiasCount", Configuration.IntParameter.class)
                .getValue();
        biasCount += Math.min(ourBiasCount, numCores - 1);
        bias += config.getParameter("Group" + id + "Bias", Configuration.FloatParameter.class).getValue();
    }
    numCores = IntMath.divide(numCores, group.actors().size(), RoundingMode.CEILING);
    biasCount = IntMath.divide(biasCount, group.actors().size(), RoundingMode.FLOOR);
    bias /= group.actors().size();
    //Transpose coreOrders.
    List<Integer> coreOrder = new ArrayList<>();
    for (int i = 0; i < coreOrders.get(0).size(); ++i)
        for (int j = 0; j < coreOrders.size(); ++j)
            coreOrder.add(coreOrders.get(j).get(i));
    //Remove duplicates preserving order.
    coreOrder = new ArrayList<>(new LinkedHashSet<>(coreOrder));

    List<Core> subset = new ArrayList<>(numCores);
    for (int i = 0; i < coreOrder.size() && subset.size() < numCores; ++i)
        if (coreOrder.get(i) < cores.size())
            subset.add(cores.get(coreOrder.get(i)));
    List<Core> biasSubset = new ArrayList<>(biasCount);
    while (biasSubset.size() < biasCount)
        biasSubset.add(subset.remove(0));

    float deficitFraction = biasCount * (1 - bias) / numCores, surplusFraction = 1 - deficitFraction;
    assert deficitFraction >= 0 && surplusFraction >= 0 : String.format("%d %d %f -> %f %f", numCores,
            biasCount, bias, deficitFraction, surplusFraction);
    iterations = iterations.canonical(DiscreteDomain.integers());
    int totalIterations = iterations.upperEndpoint() - iterations.lowerEndpoint();
    int biasIterations = (int) (totalIterations * deficitFraction);
    //We pass a null config to ensure we don't interfere with the other strategy.
    if (biasCount > 0)
        new FullDataParallelAllocationStrategy(biasCount).allocateGroup(group,
                Range.closedOpen(iterations.lowerEndpoint(), iterations.lowerEndpoint() + biasIterations),
                biasSubset, null);
    if (numCores - biasCount > 0)
        new FullDataParallelAllocationStrategy(numCores - biasCount).allocateGroup(group,
                Range.closedOpen(iterations.lowerEndpoint() + biasIterations, iterations.upperEndpoint()),
                subset, null);
}

From source file:org.apache.kylin.storage.hbase.ii.coprocessor.endpoint.IIEndpoint.java

private Scan prepareScan(IIProtos.IIRequest request, HRegion region) throws IOException {
    Scan scan = new Scan();

    scan.addColumn(IIDesc.HBASE_FAMILY_BYTES, IIDesc.HBASE_QUALIFIER_BYTES);
    scan.addColumn(IIDesc.HBASE_FAMILY_BYTES, IIDesc.HBASE_DICTIONARY_BYTES);

    if (request.hasTsRange()) {
        Range<Long> tsRange = (Range<Long>) SerializationUtils
                .deserialize(HBaseZeroCopyByteString.zeroCopyGetBytes(request.getTsRange()));
        byte[] regionStartKey = region.getStartKey();
        if (!ArrayUtils.isEmpty(regionStartKey)) {
            shard = BytesUtil.readUnsigned(regionStartKey, 0, IIKeyValueCodec.SHARD_LEN);
        } else {//from w ww. j  a va2 s  .c  om
            shard = 0;
        }
        logger.info("Start key of the region is: " + BytesUtil.toReadableText(regionStartKey)
                + ", making shard to be :" + shard);

        if (tsRange.hasLowerBound()) {
            //differentiate GT and GTE seems not very beneficial
            Preconditions.checkArgument(shard != -1, "Shard is -1!");
            long tsStart = tsRange.lowerEndpoint();
            logger.info("ts start is " + tsStart);

            byte[] idealStartKey = new byte[IIKeyValueCodec.SHARD_LEN + IIKeyValueCodec.TIMEPART_LEN];
            BytesUtil.writeUnsigned(shard, idealStartKey, 0, IIKeyValueCodec.SHARD_LEN);
            BytesUtil.writeLong(tsStart, idealStartKey, IIKeyValueCodec.SHARD_LEN,
                    IIKeyValueCodec.TIMEPART_LEN);
            logger.info("ideaStartKey is(readable) :" + BytesUtil.toReadableText(idealStartKey));
            Result result = region.getClosestRowBefore(idealStartKey, IIDesc.HBASE_FAMILY_BYTES);
            if (result != null) {
                byte[] actualStartKey = Arrays.copyOf(result.getRow(),
                        IIKeyValueCodec.SHARD_LEN + IIKeyValueCodec.TIMEPART_LEN);
                scan.setStartRow(actualStartKey);
                logger.info("The start key is set to " + BytesUtil.toReadableText(actualStartKey));
            } else {
                logger.info("There is no key before ideaStartKey so ignore tsStart");
            }
        }

        if (tsRange.hasUpperBound()) {
            //differentiate LT and LTE seems not very beneficial
            Preconditions.checkArgument(shard != -1, "Shard is -1");
            long tsEnd = tsRange.upperEndpoint();
            logger.info("ts end is " + tsEnd);

            byte[] actualEndKey = new byte[IIKeyValueCodec.SHARD_LEN + IIKeyValueCodec.TIMEPART_LEN];
            BytesUtil.writeUnsigned(shard, actualEndKey, 0, IIKeyValueCodec.SHARD_LEN);
            BytesUtil.writeLong(tsEnd + 1, actualEndKey, IIKeyValueCodec.SHARD_LEN,
                    IIKeyValueCodec.TIMEPART_LEN);//notice +1 here
            scan.setStopRow(actualEndKey);
            logger.info("The stop key is set to " + BytesUtil.toReadableText(actualEndKey));
        }
    }

    return scan;
}

From source file:org.pshdl.model.extensions.RangeExtension.java

protected Optional<Range<BigInteger>> _determineRange(final HDLArithOp obj,
        final HDLEvaluationContext context) {
    HDLExpression _left = obj.getLeft();
    final Optional<Range<BigInteger>> leftRange = this.determineRange(_left, context);
    boolean _isPresent = leftRange.isPresent();
    boolean _not = (!_isPresent);
    if (_not) {//from  w  ww.j  a va 2s .c o  m
        return Optional.<Range<BigInteger>>absent();
    }
    final Range<BigInteger> lrVal = leftRange.get();
    boolean _or = false;
    boolean _hasLowerBound = lrVal.hasLowerBound();
    boolean _not_1 = (!_hasLowerBound);
    if (_not_1) {
        _or = true;
    } else {
        boolean _hasUpperBound = lrVal.hasUpperBound();
        boolean _not_2 = (!_hasUpperBound);
        _or = _not_2;
    }
    if (_or) {
        return Optional.<Range<BigInteger>>absent();
    }
    HDLExpression _right = obj.getRight();
    final Optional<Range<BigInteger>> rightRange = this.determineRange(_right, context);
    boolean _isPresent_1 = rightRange.isPresent();
    boolean _not_3 = (!_isPresent_1);
    if (_not_3) {
        return Optional.<Range<BigInteger>>absent();
    }
    final Range<BigInteger> rrVal = rightRange.get();
    boolean _or_1 = false;
    boolean _hasLowerBound_1 = rrVal.hasLowerBound();
    boolean _not_4 = (!_hasLowerBound_1);
    if (_not_4) {
        _or_1 = true;
    } else {
        boolean _hasUpperBound_1 = rrVal.hasUpperBound();
        boolean _not_5 = (!_hasUpperBound_1);
        _or_1 = _not_5;
    }
    if (_or_1) {
        return Optional.<Range<BigInteger>>absent();
    }
    HDLArithOp.HDLArithOpType _type = obj.getType();
    if (_type != null) {
        switch (_type) {
        case PLUS:
            BigInteger _lowerEndpoint = lrVal.lowerEndpoint();
            BigInteger _lowerEndpoint_1 = rrVal.lowerEndpoint();
            BigInteger _add = _lowerEndpoint.add(_lowerEndpoint_1);
            BigInteger _upperEndpoint = lrVal.upperEndpoint();
            BigInteger _upperEndpoint_1 = rrVal.upperEndpoint();
            BigInteger _add_1 = _upperEndpoint.add(_upperEndpoint_1);
            Range<BigInteger> _createRange = RangeTool.<BigInteger>createRange(_add, _add_1);
            return Optional.<Range<BigInteger>>of(_createRange);
        case MINUS:
            BigInteger _lowerEndpoint_2 = lrVal.lowerEndpoint();
            BigInteger _lowerEndpoint_3 = rrVal.lowerEndpoint();
            BigInteger _subtract = _lowerEndpoint_2.subtract(_lowerEndpoint_3);
            BigInteger _upperEndpoint_2 = lrVal.upperEndpoint();
            BigInteger _upperEndpoint_3 = rrVal.upperEndpoint();
            BigInteger _subtract_1 = _upperEndpoint_2.subtract(_upperEndpoint_3);
            Range<BigInteger> _createRange_1 = RangeTool.<BigInteger>createRange(_subtract, _subtract_1);
            return Optional.<Range<BigInteger>>of(_createRange_1);
        case DIV:
            boolean _or_2 = false;
            BigInteger _lowerEndpoint_4 = rrVal.lowerEndpoint();
            boolean _equals = _lowerEndpoint_4.equals(BigInteger.ZERO);
            if (_equals) {
                _or_2 = true;
            } else {
                BigInteger _upperEndpoint_4 = rrVal.upperEndpoint();
                boolean _equals_1 = _upperEndpoint_4.equals(BigInteger.ZERO);
                _or_2 = _equals_1;
            }
            if (_or_2) {
                obj.<IHDLObject>addMeta(ProblemDescription.SOURCE, obj);
                obj.<ProblemDescription>addMeta(ProblemDescription.DESCRIPTION, ProblemDescription.ZERO_DIVIDE);
                return Optional.<Range<BigInteger>>absent();
            }
            boolean _or_3 = false;
            BigInteger _lowerEndpoint_5 = rrVal.lowerEndpoint();
            int _signum = _lowerEndpoint_5.signum();
            BigInteger _upperEndpoint_5 = rrVal.upperEndpoint();
            int _signum_1 = _upperEndpoint_5.signum();
            int _multiply = (_signum * _signum_1);
            boolean _lessThan = (_multiply < 0);
            if (_lessThan) {
                _or_3 = true;
            } else {
                BigInteger _upperEndpoint_6 = rrVal.upperEndpoint();
                int _signum_2 = _upperEndpoint_6.signum();
                boolean _equals_2 = (_signum_2 == 0);
                _or_3 = _equals_2;
            }
            if (_or_3) {
                obj.<IHDLObject>addMeta(ProblemDescription.SOURCE, obj);
                obj.<ProblemDescription>addMeta(ProblemDescription.DESCRIPTION,
                        ProblemDescription.POSSIBLY_ZERO_DIVIDE);
            }
            BigInteger _lowerEndpoint_6 = rrVal.lowerEndpoint();
            BigDecimal _bigDecimal = new BigDecimal(_lowerEndpoint_6);
            BigDecimal _divide = BigDecimal.ONE.divide(_bigDecimal);
            BigInteger _upperEndpoint_7 = rrVal.upperEndpoint();
            BigDecimal _bigDecimal_1 = new BigDecimal(_upperEndpoint_7);
            BigDecimal _divide_1 = BigDecimal.ONE.divide(_bigDecimal_1);
            final Range<BigDecimal> mulRange = RangeTool.<BigDecimal>createRange(_divide, _divide_1);
            BigInteger _lowerEndpoint_7 = lrVal.lowerEndpoint();
            BigDecimal _bigDecimal_2 = new BigDecimal(_lowerEndpoint_7);
            BigDecimal _lowerEndpoint_8 = mulRange.lowerEndpoint();
            final BigDecimal ff = _bigDecimal_2.multiply(_lowerEndpoint_8);
            BigInteger _lowerEndpoint_9 = lrVal.lowerEndpoint();
            BigDecimal _bigDecimal_3 = new BigDecimal(_lowerEndpoint_9);
            BigDecimal _upperEndpoint_8 = mulRange.upperEndpoint();
            final BigDecimal ft = _bigDecimal_3.multiply(_upperEndpoint_8);
            BigInteger _upperEndpoint_9 = lrVal.upperEndpoint();
            BigDecimal _bigDecimal_4 = new BigDecimal(_upperEndpoint_9);
            BigDecimal _lowerEndpoint_10 = mulRange.lowerEndpoint();
            final BigDecimal tf = _bigDecimal_4.multiply(_lowerEndpoint_10);
            BigInteger _upperEndpoint_10 = lrVal.upperEndpoint();
            BigDecimal _bigDecimal_5 = new BigDecimal(_upperEndpoint_10);
            BigDecimal _upperEndpoint_11 = mulRange.upperEndpoint();
            final BigDecimal tt = _bigDecimal_5.multiply(_upperEndpoint_11);
            BigDecimal _min = ff.min(ft);
            BigDecimal _min_1 = _min.min(tf);
            BigDecimal _min_2 = _min_1.min(tt);
            BigInteger _bigInteger = _min_2.toBigInteger();
            BigDecimal _max = ff.max(ft);
            BigDecimal _max_1 = _max.max(tf);
            BigDecimal _max_2 = _max_1.max(tt);
            BigInteger _bigInteger_1 = _max_2.toBigInteger();
            Range<BigInteger> _createRange_2 = RangeTool.<BigInteger>createRange(_bigInteger, _bigInteger_1);
            return Optional.<Range<BigInteger>>of(_createRange_2);
        case MUL:
            BigInteger _lowerEndpoint_11 = lrVal.lowerEndpoint();
            BigInteger _lowerEndpoint_12 = rrVal.lowerEndpoint();
            final BigInteger ff_1 = _lowerEndpoint_11.multiply(_lowerEndpoint_12);
            BigInteger _lowerEndpoint_13 = lrVal.lowerEndpoint();
            BigInteger _upperEndpoint_12 = rrVal.upperEndpoint();
            final BigInteger ft_1 = _lowerEndpoint_13.multiply(_upperEndpoint_12);
            BigInteger _upperEndpoint_13 = lrVal.upperEndpoint();
            BigInteger _lowerEndpoint_14 = rrVal.lowerEndpoint();
            final BigInteger tf_1 = _upperEndpoint_13.multiply(_lowerEndpoint_14);
            BigInteger _upperEndpoint_14 = lrVal.upperEndpoint();
            BigInteger _upperEndpoint_15 = rrVal.upperEndpoint();
            final BigInteger tt_1 = _upperEndpoint_14.multiply(_upperEndpoint_15);
            BigInteger _min_3 = ff_1.min(ft_1);
            BigInteger _min_4 = _min_3.min(tf_1);
            BigInteger _min_5 = _min_4.min(tt_1);
            BigInteger _max_3 = ff_1.max(ft_1);
            BigInteger _max_4 = _max_3.max(tf_1);
            BigInteger _max_5 = _max_4.max(tt_1);
            Range<BigInteger> _createRange_3 = RangeTool.<BigInteger>createRange(_min_5, _max_5);
            return Optional.<Range<BigInteger>>of(_createRange_3);
        case MOD:
            final BigInteger rle = rrVal.lowerEndpoint();
            final BigInteger leftBound = rle.min(BigInteger.ZERO);
            BigInteger _upperEndpoint_16 = rrVal.upperEndpoint();
            final BigInteger rue = _upperEndpoint_16.subtract(BigInteger.ONE);
            final BigInteger rightBound = rue.max(BigInteger.ZERO);
            Range<BigInteger> _createRange_4 = RangeTool.<BigInteger>createRange(leftBound, rightBound);
            return Optional.<Range<BigInteger>>of(_createRange_4);
        case POW:
            BigInteger _lowerEndpoint_15 = lrVal.lowerEndpoint();
            BigInteger _lowerEndpoint_16 = rrVal.lowerEndpoint();
            int _intValue = _lowerEndpoint_16.intValue();
            final BigInteger ff_2 = _lowerEndpoint_15.pow(_intValue);
            BigInteger _lowerEndpoint_17 = lrVal.lowerEndpoint();
            BigInteger _upperEndpoint_17 = rrVal.upperEndpoint();
            int _intValue_1 = _upperEndpoint_17.intValue();
            final BigInteger ft_2 = _lowerEndpoint_17.pow(_intValue_1);
            BigInteger _upperEndpoint_18 = lrVal.upperEndpoint();
            BigInteger _lowerEndpoint_18 = rrVal.lowerEndpoint();
            int _intValue_2 = _lowerEndpoint_18.intValue();
            final BigInteger tf_2 = _upperEndpoint_18.pow(_intValue_2);
            BigInteger _upperEndpoint_19 = lrVal.upperEndpoint();
            BigInteger _upperEndpoint_20 = rrVal.upperEndpoint();
            int _intValue_3 = _upperEndpoint_20.intValue();
            final BigInteger tt_2 = _upperEndpoint_19.pow(_intValue_3);
            BigInteger _min_6 = ff_2.min(ft_2);
            BigInteger _min_7 = _min_6.min(tf_2);
            BigInteger _min_8 = _min_7.min(tt_2);
            BigInteger _max_6 = ff_2.max(ft_2);
            BigInteger _max_7 = _max_6.max(tf_2);
            BigInteger _max_8 = _max_7.max(tt_2);
            Range<BigInteger> _createRange_5 = RangeTool.<BigInteger>createRange(_min_8, _max_8);
            return Optional.<Range<BigInteger>>of(_createRange_5);
        default:
            break;
        }
    }
    throw new RuntimeException("Incorrectly implemented obj op");
}

From source file:org.apache.bookkeeper.mledger.impl.ManagedCursorImpl.java

long getNumIndividualDeletedEntriesToSkip(long numEntries) {
    long totalEntriesToSkip = 0;
    long deletedMessages = 0;
    lock.readLock().lock();//from  w w  w  .  jav  a 2s  .c om
    try {
        PositionImpl startPosition = markDeletePosition;
        PositionImpl endPosition = null;
        for (Range<PositionImpl> r : individualDeletedMessages.asRanges()) {
            endPosition = r.lowerEndpoint();
            if (startPosition.compareTo(endPosition) <= 0) {
                Range<PositionImpl> range = Range.openClosed(startPosition, endPosition);
                long entries = ledger.getNumberOfEntries(range);
                if (totalEntriesToSkip + entries >= numEntries) {
                    break;
                }
                totalEntriesToSkip += entries;
                deletedMessages += ledger.getNumberOfEntries(r);
                startPosition = r.upperEndpoint();
            } else {
                if (log.isDebugEnabled()) {
                    log.debug("[{}] deletePosition {} moved ahead without clearing deleteMsgs {} for cursor {}",
                            ledger.getName(), markDeletePosition, r.lowerEndpoint(), name);
                }
            }
        }
    } finally {
        lock.readLock().unlock();
    }
    return deletedMessages;
}

From source file:org.apache.bookkeeper.mledger.impl.ManagedCursorImpl.java

@Override
public void asyncDelete(Position pos, final AsyncCallbacks.DeleteCallback callback, Object ctx) {
    checkArgument(pos instanceof PositionImpl);

    if (STATE_UPDATER.get(this) == State.Closed) {
        callback.deleteFailed(new ManagedLedgerException("Cursor was already closed"), ctx);
        return;//from  ww w  .  jav a  2  s .c o m
    }

    PositionImpl position = (PositionImpl) pos;

    PositionImpl previousPosition = ledger.getPreviousPosition(position);
    PositionImpl newMarkDeletePosition = null;

    lock.writeLock().lock();

    try {
        if (log.isDebugEnabled()) {
            log.debug(
                    "[{}] [{}] Deleting single message at {}. Current status: {} - md-position: {}  - previous-position: {}",
                    ledger.getName(), name, pos, individualDeletedMessages, markDeletePosition,
                    previousPosition);
        }

        if (individualDeletedMessages.contains(position) || position.compareTo(markDeletePosition) <= 0) {
            if (log.isDebugEnabled()) {
                log.debug("[{}] [{}] Position was already deleted {}", ledger.getName(), name, position);
            }
            callback.deleteComplete(ctx);
            return;
        }

        if (previousPosition.compareTo(markDeletePosition) == 0 && individualDeletedMessages.isEmpty()) {
            if (log.isDebugEnabled()) {
                log.debug("[{}][{}] Immediately mark-delete to position {}", ledger.getName(), name, position);
            }

            newMarkDeletePosition = position;
        } else {
            // Add a range (prev, pos] to the set. Adding the previous entry as an open limit to the range will make
            // the RangeSet recognize the "continuity" between adjacent Positions
            individualDeletedMessages.add(Range.openClosed(previousPosition, position));
            ++messagesConsumedCounter;

            if (log.isDebugEnabled()) {
                log.debug("[{}] [{}] Individually deleted messages: {}", ledger.getName(), name,
                        individualDeletedMessages);
            }

            // If the lower bound of the range set is the current mark delete position, then we can trigger a new
            // mark
            // delete to the upper bound of the first range segment
            Range<PositionImpl> range = individualDeletedMessages.asRanges().iterator().next();

            // Bug:7062188 - markDeletePosition can sometimes be stuck at the beginning of an empty ledger.
            // If the lowerBound is ahead of MarkDelete, verify if there are any entries in-between
            if (range.lowerEndpoint().compareTo(markDeletePosition) <= 0 || ledger
                    .getNumberOfEntries(Range.openClosed(markDeletePosition, range.lowerEndpoint())) <= 0) {

                if (log.isDebugEnabled()) {
                    log.debug("[{}] Found a position range to mark delete for cursor {}: {} ", ledger.getName(),
                            name, range);
                }

                newMarkDeletePosition = range.upperEndpoint();
            }
        }

        if (newMarkDeletePosition != null) {
            newMarkDeletePosition = setAcknowledgedPosition(newMarkDeletePosition);
        } else {
            newMarkDeletePosition = markDeletePosition;
        }
    } catch (Exception e) {
        log.warn("[{}] [{}] Error while updating individualDeletedMessages [{}]", ledger.getName(), name,
                e.getMessage(), e);
        callback.deleteFailed(new ManagedLedgerException(e), ctx);
        return;
    } finally {
        lock.writeLock().unlock();
    }

    // Apply rate limiting to mark-delete operations
    if (markDeleteLimiter != null && !markDeleteLimiter.tryAcquire()) {
        callback.deleteComplete(ctx);
        return;
    }

    try {
        internalAsyncMarkDelete(newMarkDeletePosition, new MarkDeleteCallback() {
            @Override
            public void markDeleteComplete(Object ctx) {
                callback.deleteComplete(ctx);
            }

            @Override
            public void markDeleteFailed(ManagedLedgerException exception, Object ctx) {
                callback.deleteFailed(exception, ctx);
            }

        }, ctx);

    } catch (Exception e) {
        log.warn("[{}] [{}] Error doing asyncDelete [{}]", ledger.getName(), name, e.getMessage(), e);
        if (log.isDebugEnabled()) {
            log.debug("[{}] Consumer {} cursor asyncDelete error, counters: consumed {} mdPos {} rdPos {}",
                    ledger.getName(), name, messagesConsumedCounter, markDeletePosition, readPosition);
        }
        callback.deleteFailed(new ManagedLedgerException(e), ctx);
    }
}

From source file:org.pshdl.model.extensions.RangeExtension.java

protected Optional<Range<BigInteger>> _determineRange(final HDLBitOp obj, final HDLEvaluationContext context) {
    HDLExpression _left = obj.getLeft();
    final Optional<Range<BigInteger>> leftRange = this.determineRange(_left, context);
    boolean _isPresent = leftRange.isPresent();
    boolean _not = (!_isPresent);
    if (_not) {//w ww.  j  av a 2 s . co m
        return Optional.<Range<BigInteger>>absent();
    }
    final Range<BigInteger> lrVal = leftRange.get();
    boolean _or = false;
    boolean _hasLowerBound = lrVal.hasLowerBound();
    boolean _not_1 = (!_hasLowerBound);
    if (_not_1) {
        _or = true;
    } else {
        boolean _hasUpperBound = lrVal.hasUpperBound();
        boolean _not_2 = (!_hasUpperBound);
        _or = _not_2;
    }
    if (_or) {
        return Optional.<Range<BigInteger>>absent();
    }
    HDLExpression _right = obj.getRight();
    final Optional<Range<BigInteger>> rightRange = this.determineRange(_right, context);
    boolean _isPresent_1 = rightRange.isPresent();
    boolean _not_3 = (!_isPresent_1);
    if (_not_3) {
        return Optional.<Range<BigInteger>>absent();
    }
    final Range<BigInteger> rrVal = rightRange.get();
    boolean _or_1 = false;
    boolean _hasLowerBound_1 = rrVal.hasLowerBound();
    boolean _not_4 = (!_hasLowerBound_1);
    if (_not_4) {
        _or_1 = true;
    } else {
        boolean _hasUpperBound_1 = rrVal.hasUpperBound();
        boolean _not_5 = (!_hasUpperBound_1);
        _or_1 = _not_5;
    }
    if (_or_1) {
        return Optional.<Range<BigInteger>>absent();
    }
    HDLBitOp.HDLBitOpType _type = obj.getType();
    final HDLBitOp.HDLBitOpType type = _type;
    boolean _matched = false;
    if (!_matched) {
        boolean _or_2 = false;
        boolean _equals = Objects.equal(type, HDLBitOp.HDLBitOpType.OR);
        if (_equals) {
            _or_2 = true;
        } else {
            boolean _equals_1 = Objects.equal(type, HDLBitOp.HDLBitOpType.XOR);
            _or_2 = _equals_1;
        }
        if (_or_2) {
            _matched = true;
            obj.<IHDLObject>addMeta(ProblemDescription.SOURCE, obj);
            obj.<ProblemDescription>addMeta(ProblemDescription.DESCRIPTION,
                    ProblemDescription.BIT_NOT_SUPPORTED_FOR_RANGES);
            BigInteger _upperEndpoint = lrVal.upperEndpoint();
            int _bitLength = _upperEndpoint.bitLength();
            BigInteger _shiftLeft = BigInteger.ONE.shiftLeft(_bitLength);
            BigInteger _subtract = _shiftLeft.subtract(BigInteger.ONE);
            Range<BigInteger> _createRange = RangeTool.<BigInteger>createRange(BigInteger.ZERO, _subtract);
            return Optional.<Range<BigInteger>>of(_createRange);
        }
    }
    if (!_matched) {
        if (Objects.equal(type, HDLBitOp.HDLBitOpType.AND)) {
            _matched = true;
            obj.<IHDLObject>addMeta(ProblemDescription.SOURCE, obj);
            obj.<ProblemDescription>addMeta(ProblemDescription.DESCRIPTION,
                    ProblemDescription.BIT_NOT_SUPPORTED_FOR_RANGES);
            BigInteger _upperEndpoint_1 = lrVal.upperEndpoint();
            BigInteger _upperEndpoint_2 = rrVal.upperEndpoint();
            int _bitLength_1 = _upperEndpoint_2.bitLength();
            BigInteger _shiftLeft_1 = BigInteger.ONE.shiftLeft(_bitLength_1);
            BigInteger _subtract_1 = _shiftLeft_1.subtract(BigInteger.ONE);
            BigInteger _min = _upperEndpoint_1.min(_subtract_1);
            Range<BigInteger> _createRange_1 = RangeTool.<BigInteger>createRange(BigInteger.ZERO, _min);
            return Optional.<Range<BigInteger>>of(_createRange_1);
        }
    }
    if (!_matched) {
        boolean _or_3 = false;
        boolean _equals_2 = Objects.equal(type, HDLBitOp.HDLBitOpType.LOGI_AND);
        if (_equals_2) {
            _or_3 = true;
        } else {
            boolean _equals_3 = Objects.equal(type, HDLBitOp.HDLBitOpType.LOGI_OR);
            _or_3 = _equals_3;
        }
        if (_or_3) {
            _matched = true;
            obj.<IHDLObject>addMeta(ProblemDescription.SOURCE, obj);
            obj.<ProblemDescription>addMeta(ProblemDescription.DESCRIPTION,
                    ProblemDescription.BOOLEAN_NOT_SUPPORTED_FOR_RANGES);
            Range<BigInteger> _createRange_2 = RangeTool.<BigInteger>createRange(BigInteger.ZERO,
                    BigInteger.ONE);
            return Optional.<Range<BigInteger>>of(_createRange_2);
        }
    }
    throw new RuntimeException("Incorrectly implemented obj op");
}

From source file:io.github.msdk.features.ransacaligner.RansacAlignerMethod.java

private Hashtable<FeatureTableRow, FeatureTableRow> getAlignmentMap(FeatureTable featureTable) {

    // Create a table of mappings for best scores
    Hashtable<FeatureTableRow, FeatureTableRow> alignmentMapping = new Hashtable<FeatureTableRow, FeatureTableRow>();

    // Create a sorted set of scores matching
    TreeSet<RowVsRowScore> scoreSet = new TreeSet<RowVsRowScore>();

    // RANSAC algorithm
    List<AlignStructMol> list = ransacPeakLists(result, featureTable);
    PolynomialFunction function = this.getPolynomialFunction(list);

    List<FeatureTableRow> allRows = featureTable.getRows();

    for (FeatureTableRow row : allRows) {
        // Calculate limits for a row with which the row can be aligned
        Range<Double> mzRange = mzTolerance.getToleranceRange(row.getMz());

        double rt;
        try {/* ww w . ja v a  2s. c om*/
            rt = function.value(row.getChromatographyInfo().getRetentionTime());
        } catch (NullPointerException e) {
            rt = row.getChromatographyInfo().getRetentionTime();
        }
        if (Double.isNaN(rt) || rt == -1) {
            rt = row.getChromatographyInfo().getRetentionTime();
        }

        Range<Double> rtRange = rtToleranceAfterCorrection.getToleranceRange(rt);

        // Get all rows of the aligned feature table within the m/z and
        // RT limits
        List<FeatureTableRow> candidateRows = result.getRowsInsideRange(rtRange, mzRange);

        for (FeatureTableRow candidateRow : candidateRows) {
            RowVsRowScore score;
            if (requireSameCharge) {
                FeatureTableColumn<Integer> chargeColumn1 = featureTable.getColumn(ColumnName.CHARGE, null);
                FeatureTableColumn<Integer> chargeColumn2 = result.getColumn(ColumnName.CHARGE, null);
                Integer charge1 = row.getData(chargeColumn1);
                Integer charge2 = candidateRow.getData(chargeColumn2);
                if (!charge1.equals(charge2))
                    continue;
            }

            // Check ion annotation
            if (requireSameAnnotation) {
                FeatureTableColumn<List<IonAnnotation>> ionAnnotationColumn1 = featureTable
                        .getColumn(ColumnName.IONANNOTATION, null);
                FeatureTableColumn<List<IonAnnotation>> ionAnnotationColumn2 = result
                        .getColumn(ColumnName.IONANNOTATION, null);
                List<IonAnnotation> ionAnnotations1 = row.getData(ionAnnotationColumn1);
                List<IonAnnotation> ionAnnotations2 = candidateRow.getData(ionAnnotationColumn2);

                // Check that all ion annotations in first row are in
                // the candidate row
                boolean equalIons = false;
                if (ionAnnotations1 != null && ionAnnotations2 != null) {
                    for (IonAnnotation ionAnnotation : ionAnnotations1) {
                        for (IonAnnotation targetIonAnnotation : ionAnnotations2) {
                            if (targetIonAnnotation.compareTo(ionAnnotation) == 0)
                                equalIons = true;
                        }
                    }
                }
                if (!equalIons)
                    continue;

            }

            try {
                double mzLength = mzRange.upperEndpoint() - mzRange.lowerEndpoint();
                double rtLength = rtRange.upperEndpoint() - rtRange.lowerEndpoint();
                score = new RowVsRowScore(row, candidateRow, mzLength, rtLength, new Float(rt));

                scoreSet.add(score);

            } catch (Exception e) {
                return null;
            }
        }
    }

    // Iterate scores by descending order
    Iterator<RowVsRowScore> scoreIterator = scoreSet.iterator();
    while (scoreIterator.hasNext()) {

        RowVsRowScore score = scoreIterator.next();

        // Check if the row is already mapped
        if (alignmentMapping.containsKey(score.getFeatureTableRow())) {
            continue;
        }

        // Check if the aligned row is already filled
        if (alignmentMapping.containsValue(score.getAlignedRow())) {
            continue;
        }

        alignmentMapping.put(score.getFeatureTableRow(), score.getAlignedRow());

    }

    return alignmentMapping;
}

From source file:io.github.msdk.features.joinaligner.JoinAlignerMethod.java

/** {@inheritDoc} */
@Override/* w w w  . j a v a  2  s . c  o  m*/
public FeatureTable execute() throws MSDKException {

    // Calculate number of feature to process. Each feature will be
    // processed twice: first for score calculation and then for actual
    // alignment.
    for (FeatureTable featureTable : featureTables) {
        totalFeatures += featureTable.getRows().size() * 2;
    }

    // Iterate through all feature tables
    Boolean firstFeatureTable = true;
    for (FeatureTable featureTable : featureTables) {

        // Add columns from the original feature table to the result table
        for (FeatureTableColumn<?> column : featureTable.getColumns()) {
            if (firstFeatureTable)
                result.addColumn(column);
            else if (column.getSample() != null)
                result.addColumn(column);
        }
        firstFeatureTable = false;

        // Create a sorted array of matching scores between two rows
        List<RowVsRowScore> scoreSet = new ArrayList<RowVsRowScore>();

        // Calculate scores for all possible alignments of this row
        for (FeatureTableRow row : featureTable.getRows()) {

            final Double mz = row.getMz();
            if (mz == null)
                continue;

            // Calculate the m/z range limit for the current row
            Range<Double> mzRange = mzTolerance.getToleranceRange(mz);

            // Continue if no chromatography info is available
            ChromatographyInfo chromatographyInfo = row.getChromatographyInfo();
            if (chromatographyInfo == null)
                continue;

            // Calculate the RT range limit for the current row
            Range<Double> rtRange = rtTolerance.getToleranceRange(chromatographyInfo.getRetentionTime());

            // Get all rows of the aligned feature table within the m/z and
            // RT limits
            List<FeatureTableRow> candidateRows = result.getRowsInsideRange(rtRange, mzRange);

            // Calculate scores and store them
            for (FeatureTableRow candidateRow : candidateRows) {

                // Check charge
                if (requireSameCharge) {
                    FeatureTableColumn<Integer> chargeColumn1 = featureTable.getColumn(ColumnName.CHARGE, null);
                    FeatureTableColumn<Integer> chargeColumn2 = result.getColumn(ColumnName.CHARGE, null);
                    Integer charge1 = row.getData(chargeColumn1);
                    Integer charge2 = candidateRow.getData(chargeColumn2);
                    if (!charge1.equals(charge2))
                        continue;
                }

                // Check ion annotation
                if (requireSameAnnotation) {
                    FeatureTableColumn<List<IonAnnotation>> ionAnnotationColumn1 = featureTable
                            .getColumn(ColumnName.IONANNOTATION, null);
                    FeatureTableColumn<List<IonAnnotation>> ionAnnotationColumn2 = result
                            .getColumn(ColumnName.IONANNOTATION, null);
                    List<IonAnnotation> ionAnnotations1 = row.getData(ionAnnotationColumn1);
                    List<IonAnnotation> ionAnnotations2 = candidateRow.getData(ionAnnotationColumn2);

                    // Check that all ion annotations in first row are in
                    // the candidate row
                    boolean equalIons = false;
                    if (ionAnnotations1 != null && ionAnnotations2 != null) {
                        for (IonAnnotation ionAnnotation : ionAnnotations1) {
                            for (IonAnnotation targetIonAnnotation : ionAnnotations2) {
                                if (targetIonAnnotation.compareTo(ionAnnotation) == 0)
                                    equalIons = true;
                            }
                        }
                    }
                    if (!equalIons)
                        continue;

                }

                // Calculate score
                double mzLength = mzRange.upperEndpoint() - mzRange.lowerEndpoint();
                double rtLength = rtRange.upperEndpoint() - rtRange.lowerEndpoint();
                RowVsRowScore score = new RowVsRowScore(row, candidateRow, mzLength / 2.0, mzWeight,
                        rtLength / 2.0, rtWeight);

                // Add the score to the array
                scoreSet.add(score);

            }

            processedFeatures++;

            if (canceled)
                return null;
        }

        // Create a table of mappings for best scores
        Hashtable<FeatureTableRow, FeatureTableRow> alignmentMapping = new Hashtable<FeatureTableRow, FeatureTableRow>();

        // Iterate scores by descending order
        Iterator<RowVsRowScore> scoreIterator = scoreSet.iterator();
        while (scoreIterator.hasNext()) {
            RowVsRowScore score = scoreIterator.next();

            // Check if the row is already mapped
            if (alignmentMapping.containsKey(score.getFeatureTableRow()))
                continue;

            // Check if the aligned row is already filled
            if (alignmentMapping.containsValue(score.getAlignedRow()))
                continue;

            alignmentMapping.put(score.getFeatureTableRow(), score.getAlignedRow());
        }

        // Align all rows using the mapping
        for (FeatureTableRow sourceRow : featureTable.getRows()) {
            FeatureTableRow targetRow = alignmentMapping.get(sourceRow);

            // If we have no mapping for this row, add a new one
            if (targetRow == null) {
                targetRow = MSDKObjectBuilder.getFeatureTableRow(result, newRowID);
                result.addRow(targetRow);
                FeatureTableColumn<Integer> column = result.getColumn(ColumnName.ID, null);
                targetRow.setData(column, newRowID);
                newRowID++;
            }

            // Add all features from the original row to the aligned row
            for (Sample sample : sourceRow.getFeatureTable().getSamples()) {
                FeatureTableUtil.copyFeatureValues(sourceRow, targetRow, sample);
            }

            // Combine common values from the original row with the aligned
            // row
            FeatureTableUtil.copyCommonValues(sourceRow, targetRow, true);

            processedFeatures++;
        }

        // Re-calculate average row averages
        FeatureTableUtil.recalculateAverages(result);

        if (canceled)
            return null;

    }

    // Return the new feature table
    return result;
}