Example usage for com.google.common.collect Range contains

List of usage examples for com.google.common.collect Range contains

Introduction

In this page you can find the example usage for com.google.common.collect Range contains.

Prototype

public boolean contains(C value) 

Source Link

Document

Returns true if value is within the bounds of this range.

Usage

From source file:org.robotframework.ide.eclipse.main.plugin.project.build.validation.KeywordCallArgumentsValidator.java

private boolean validateNumberOfArguments() {
    final Range<Integer> expectedArgsNumber = descriptor.getPossibleNumberOfArguments();
    final int actual = arguments.size();
    if (!expectedArgsNumber.contains(actual)) {
        if (!listIsPassed() && !dictIsPassed()) {
            final String additional = String.format("Keyword '%s' expects " + getRangesInfo(expectedArgsNumber)
                    + ", but %d " + toBeInProperForm(actual) + " provided", definingToken.getText(), actual);

            final RobotProblem problem = RobotProblem.causedBy(ArgumentProblem.INVALID_NUMBER_OF_PARAMETERS)
                    .formatMessageWith(additional);
            reporter.handleProblem(problem, file, definingToken);
            return false;
        }/*from  ww w  .j av a  2s. co  m*/
    }
    return true;
}

From source file:org.tensorics.core.tensor.lang.OngoingTensorFiltering.java

public <C extends Comparable<C>> Tensor<E> by(Class<C> coordinateClass, Range<C> coordinateRange) {
    checkNotNull(coordinateClass, "coordinateClass must not be null");
    checkNotNull(coordinateRange, "coordinateRange must not be null");

    ImmutableTensor.Builder<E> builder = ImmutableTensor.builder(tensor.shape().dimensionSet());
    builder.context(tensor.context());//from www.  j av a2s . c o  m
    for (Entry<Position, E> entry : TensorInternals.mapFrom(tensor).entrySet()) {
        if (coordinateRange.contains(entry.getKey().coordinateFor(coordinateClass))) {
            builder.put(entry.getKey(), entry.getValue());
        }
    }
    return builder.build();
}

From source file:com.pingcap.tikv.Snapshot.java

public List<KvPair> batchGet(List<ByteString> keys) {
    Region curRegion = null;//from   w w  w .j  a v a2 s .  c  o m
    Range<ByteBuffer> curKeyRange = null;
    Pair<Region, Store> lastPair = null;
    List<ByteString> keyBuffer = new ArrayList<>();
    List<KvPair> result = new ArrayList<>(keys.size());
    for (ByteString key : keys) {
        if (curRegion == null || !curKeyRange.contains(key.asReadOnlyByteBuffer())) {
            Pair<Region, Store> pair = regionCache.getRegionStorePairByKey(key);
            lastPair = pair;
            curRegion = pair.first;
            curKeyRange = Range.closedOpen(curRegion.getStartKey().asReadOnlyByteBuffer(),
                    curRegion.getEndKey().asReadOnlyByteBuffer());
            if (lastPair != null) {
                try (RegionStoreClient client = RegionStoreClient.create(lastPair.first, lastPair.second,
                        getSession())) {
                    List<KvPair> partialResult = client.batchGet(keyBuffer, version.getVersion());
                    for (KvPair kv : partialResult) {
                        // TODO: Add lock check
                        result.add(kv);
                    }
                } catch (Exception e) {
                    throw new TiClientInternalException("Error Closing Store client.", e);
                }
                keyBuffer = new ArrayList<>();
            }
            keyBuffer.add(key);
        }
    }
    return result;
}

From source file:net.sf.mzmine.datamodel.impl.SimplePeakList.java

public PeakListRow[] getRowsInsideScanAndMZRange(Range<Double> rtRange, Range<Double> mzRange) {
    Vector<PeakListRow> rowsInside = new Vector<PeakListRow>();

    for (PeakListRow row : peakListRows) {
        if (rtRange.contains(row.getAverageRT()) && mzRange.contains(row.getAverageMZ()))
            rowsInside.add(row);//from www .j a  va 2s .c om
    }

    return rowsInside.toArray(new PeakListRow[0]);
}

From source file:net.sf.mzmine.modules.visualization.ida.IDADataSet.java

public Number getMaxZ(Range<Double> mzRange) {
    double max = 1.0;
    for (int row = 0; row < totalmsmsScans; row++) {
        if (mzRange.contains(mzValues[row])) {
            if (max < intensityValues[row]) {
                max = intensityValues[row];
            }//from w w  w .  j  a v  a 2  s. co m
        }
    }
    return max;
}

From source file:net.sf.mzmine.datamodel.impl.SimplePeakList.java

/**
 * @see net.sf.mzmine.datamodel.PeakList#getPeaksInsideScanAndMZRange(double,
 *      double, double, double)//  ww w  .ja v a 2 s  . co m
 */
public Feature[] getPeaksInsideScanAndMZRange(RawDataFile file, Range<Double> rtRange, Range<Double> mzRange) {
    Vector<Feature> peaksInside = new Vector<Feature>();

    Feature[] peaks = getPeaks(file);
    for (Feature p : peaks) {
        if (rtRange.contains(p.getRT()) && mzRange.contains(p.getMZ()))
            peaksInside.add(p);
    }

    return peaksInside.toArray(new Feature[0]);
}

From source file:com.zulily.omicron.crontab.CrontabExpression.java

/**
 * Does the actual work of tearing apart the schedule expression and making them
 * into numerical sets of runtime whitelists
 *
 * @param expressionPart The current part we're working on
 * @param expression     The text expression to evaluate
 * @return A set within the expression's possible execution range
 *//*  w  w w.  ja  v a  2s  .c o m*/
private static ImmutableSortedSet<Integer> evaluateExpressionPart(final ExpressionPart expressionPart,
        final String expression) {
    // Order of operations ->
    // 1) Split value by commas (lists) and for each csv.n:
    // 2) Split value by slashes (range/rangeStep)
    // 3) Match all for '*' or split hyphenated range for rangeStart and rangeEnd
    //
    // Converts sun==7 -> sun==0 to make schedule interpretation logic easier in timeInSchedule() evaluation
    // NOTE: this breaks week spanning ranges such as fri-tue, which instead must
    //       be handled as a list of ranges fri-sat,sun-tue

    final List<String> csvParts = Utils.COMMA_SPLITTER.splitToList(expression);

    final TreeSet<Integer> results = Sets.newTreeSet();

    for (final String csvPart : csvParts) {

        final List<String> slashParts = Utils.FORWARD_SLASH_SPLITTER.splitToList(csvPart);

        // Range step of expression i.e. */2 (none is 1 obviously)
        int rangeStep = 1;

        checkArgument(!slashParts.isEmpty() && slashParts.size() <= 2, "Invalid cron expression for %s: %s",
                expressionPart.name(), expression);

        if (slashParts.size() == 2) {
            // Ordinal definition: 0 = rangeExpression, 1 = stepExpression
            final Integer rangeStepInteger = expressionPart.textUnitToInt(slashParts.get(1));

            checkNotNull(rangeStepInteger,
                    "Invalid cron expression for %s (rangeStep is not a positive int): %s",
                    expressionPart.name(), expression);

            checkArgument(rangeStepInteger > 0, "Invalid cron expression for %s (rangeStep is not valid): %s",
                    expressionPart.name(), expression);

            rangeStep = rangeStepInteger;
        }

        final String rangeExpression = slashParts.get(0);

        final Range<Integer> allowedRange = expressionPart.getAllowedRange();

        int rangeStart = allowedRange.lowerEndpoint();
        int rangeEnd = allowedRange.upperEndpoint();

        // either * or 0 or 0-6, etc
        if (!"*".equals(rangeExpression)) {

            final List<String> hyphenParts = Utils.HYPHEN_SPLITTER.splitToList(rangeExpression);

            checkArgument(!hyphenParts.isEmpty() && hyphenParts.size() <= 2,
                    "Invalid cron expression for %s: %s", expressionPart.name(), expression);

            Integer rangeStartInteger = expressionPart.textUnitToInt(hyphenParts.get(0));

            checkNotNull(rangeStartInteger, "Invalid cron expression for %s (rangeStart is not an int): %s",
                    expressionPart.name(), expression);

            //correct terrible "sunday can be either 0 or 7" bug/feature in crond
            if (expressionPart == ExpressionPart.DaysOfWeek && rangeStartInteger == 7) {
                rangeStartInteger = 0;
            }

            checkArgument(allowedRange.contains(rangeStartInteger),
                    "Invalid cron expression for %s (valid range is %s): %s", expressionPart.name(),
                    expressionPart.getAllowedRange(), expression);

            rangeStart = rangeStartInteger;

            if (hyphenParts.size() == 2) {

                Integer rangeEndInteger = expressionPart.textUnitToInt(hyphenParts.get(1));

                checkNotNull(rangeEndInteger, "Invalid cron expression for %s (rangeEnd is not an int): %s",
                        expressionPart.name(), expression);

                //correct terrible "sunday can be either 0 or 7" bug/feature in crond
                if (expressionPart == ExpressionPart.DaysOfWeek && rangeEndInteger == 7) {
                    rangeEndInteger = 0;
                }

                checkArgument(allowedRange.contains(rangeEndInteger),
                        "Invalid cron expression for %s (valid range is %s): %s", expressionPart.name(),
                        expressionPart.getAllowedRange(), expression);

                rangeEnd = rangeEndInteger;

            } else {
                // Single value specified
                rangeEnd = rangeStart;

            }

        }

        checkArgument(rangeStart <= rangeEnd,
                "Invalid cron expression for %s (range start must not be greater than range end): %s",
                expressionPart.name(), expression);

        for (int runTime = rangeStart; runTime <= rangeEnd; runTime += rangeStep) {
            results.add(runTime);
        }

    }

    return ImmutableSortedSet.copyOf(results);
}

From source file:eu.numberfour.n4js.ui.editor.EditorContentExtractor.java

/**
 * Optionally returns with the semantic AST node element (given as the element URI) as a {@link StyledTextDescriptor
 * styled text descriptor}. If the element cannot be resolved or the styled text cannot be computed this method
 * returns with and {@link Optional#absent() absent} instance but never {@code null}.
 *
 * @param uri//  www.j a v a  2 s  .com
 *            the URI of the semantic element in the AST.
 * @return a styled text descriptor representing the extracted code for the semantic AST node given with its unique
 *         URI.
 */
public Optional<StyledTextDescriptor> getDescriptorForSemanticElement(final URI uri) {
    if (null == uri) {
        return absent();
    }

    final URI trimmedUri = uri.hasFragment() ? uri.trimFragment() : uri;
    final IN4JSProject project = core.findProject(trimmedUri).orNull();
    if (project == null) {
        return absent();
    }
    final ResourceSet resSet = core.createResourceSet(Optional.of(project));
    final IResourceDescriptions index = core.getXtextIndex(resSet);
    final IResourceDescription resDesc = index.getResourceDescription(trimmedUri);
    final TModule module = core.loadModuleFromIndex(resSet, resDesc, false);
    if (null == module || null == module.eResource() || null == module.eResource().getResourceSet()) {
        return absent();
    }

    final URI moduleUri = module.eResource().getURI();
    final IFile file = getWorkspace().getRoot().getFile(new Path(moduleUri.toPlatformString(true)));
    if (null == file || !file.exists()) {
        return absent();
    }

    final FileEditorInput editorInput = new FileEditorInput(file);
    try {
        docProvider.connect(editorInput);
    } catch (final CoreException e) {
        LOGGER.error("Error while connecting editor input with document provider: " + e);
        return absent();
    }

    final IDocument doc = docProvider.getDocument(editorInput);
    if (null == doc) {
        return absent();
    }

    final XtextResource xtextResource = (XtextResource) module.eResource();
    final ResourceSet resourceSet = xtextResource.getResourceSet();
    final EObject object = resourceSet.getEObject(uri, true);
    if (null == object) {
        return absent();
    }

    final ITextRegion textRegion = locationInFileProvider.getFullTextRegion(object);
    if (null == textRegion) {
        return absent();
    }

    try {

        final int lineOfOffset = doc.getLineOfOffset(textRegion.getOffset());
        final int lineOffset = doc.getLineOffset(lineOfOffset);
        final int offset = lineOffset;
        final int length = textRegion.getLength() + (textRegion.getOffset() - lineOffset);
        final String text = doc.get(offset, length);

        final IPresentationRepairer repairer = repairerProvider.get();
        final IPresentationDamager damager = damagerProvider.get();
        for (final String contentType : partitionTypeMapper.getSupportedPartitionTypes()) {
            reconciler.setRepairer(repairer, contentType);
            repairer.setDocument(doc);
            reconciler.setDamager(damager, contentType);
            damager.setDocument(doc);
        }

        final Region region = new Region(offset, length);
        final TextPresentation textPresentation = reconciler.createRepairDescription(region, doc);

        final Iterator<?> rangeItr = textPresentation.getAllStyleRangeIterator();
        final Collection<StyleRange> ranges = newLinkedList();
        while (rangeItr.hasNext()) {
            final Object next = rangeItr.next();
            if (next instanceof StyleRange) {
                ranges.add((StyleRange) next);
            }
        }

        final Range<Integer> textRange = Range.closed(offset, offset + length);
        for (final Iterator<StyleRange> itr = ranges.iterator(); itr.hasNext(); /* nothing */) {
            final StyleRange range = itr.next();
            if (!textRange.contains(range.start) || !textRange.contains(range.start + range.length)) {
                itr.remove();
            } else {
                range.start = range.start - offset;
            }
        }

        return fromNullable(new StyledTextDescriptorImpl(text, ranges));

    } catch (final BadLocationException e) {
        LOGGER.error("Error while trying to extract text from document.", e);
        return absent();
    }

}

From source file:net.sf.mzmine.modules.peaklistmethods.filtering.peakfilter.PeakFilterTask.java

/**
 * Filter the peak list./*  w ww . ja  v a  2s  .c  o  m*/
 *
 * @param peakList
 *            peak list to filter.
 * @return a new peak list with entries of the original peak list that pass
 *         the filtering.
 */
private PeakList filterPeakList(final PeakList peakList) {

    // Make a copy of the peakList
    final PeakList newPeakList = new SimplePeakList(
            peakList.getName() + ' ' + parameters.getParameter(RowsFilterParameters.SUFFIX).getValue(),
            peakList.getRawDataFiles());

    // Get parameters - which filters are active
    final boolean filterByDuration = parameters.getParameter(PeakFilterParameters.PEAK_DURATION).getValue();
    final boolean filterByArea = parameters.getParameter(PeakFilterParameters.PEAK_AREA).getValue();
    final boolean filterByHeight = parameters.getParameter(PeakFilterParameters.PEAK_HEIGHT).getValue();
    final boolean filterByDatapoints = parameters.getParameter(PeakFilterParameters.PEAK_DATAPOINTS).getValue();
    final boolean filterByFWHM = parameters.getParameter(PeakFilterParameters.PEAK_FWHM).getValue();
    final boolean filterByTailingFactor = parameters.getParameter(PeakFilterParameters.PEAK_TAILINGFACTOR)
            .getValue();
    final boolean filterByAsymmetryFactor = parameters.getParameter(PeakFilterParameters.PEAK_ASYMMETRYFACTOR)
            .getValue();

    // Loop through all rows in peak list
    final PeakListRow[] rows = peakList.getRows();
    totalRows = rows.length;
    for (processedRows = 0; !isCanceled() && processedRows < totalRows; processedRows++) {
        final PeakListRow row = rows[processedRows];
        final RawDataFile[] rawdatafiles = row.getRawDataFiles();
        int totalRawDataFiles = rawdatafiles.length;
        boolean[] keepPeak = new boolean[totalRawDataFiles];

        for (int i = 0; i < totalRawDataFiles; i++) {
            // Peak values
            keepPeak[i] = true;
            final Feature peak = row.getPeak(rawdatafiles[i]);
            final double peakDuration = peak.getRawDataPointsRTRange().upperEndpoint()
                    - peak.getRawDataPointsRTRange().lowerEndpoint();
            final double peakArea = peak.getArea();
            final double peakHeight = peak.getHeight();
            final int peakDatapoints = peak.getScanNumbers().length;

            Double peakFWHM = peak.getFWHM();
            Double peakTailingFactor = peak.getTailingFactor();
            Double peakAsymmetryFactor = peak.getAsymmetryFactor();
            if (peakFWHM == null) {
                peakFWHM = -1.0;
            }
            if (peakTailingFactor == null) {
                peakTailingFactor = -1.0;
            }
            if (peakAsymmetryFactor == null) {
                peakAsymmetryFactor = -1.0;
            }

            // Check Duration
            if (filterByDuration) {
                final Range<Double> durationRange = parameters.getParameter(PeakFilterParameters.PEAK_DURATION)
                        .getEmbeddedParameter().getValue();
                if (!durationRange.contains(peakDuration)) {
                    // Mark peak to be removed
                    keepPeak[i] = false;
                }
            }

            // Check Area
            if (filterByArea) {
                final Range<Double> areaRange = parameters.getParameter(PeakFilterParameters.PEAK_AREA)
                        .getEmbeddedParameter().getValue();
                if (!areaRange.contains(peakArea)) {
                    // Mark peak to be removed
                    keepPeak[i] = false;
                }
            }

            // Check Height
            if (filterByHeight) {
                final Range<Double> heightRange = parameters.getParameter(PeakFilterParameters.PEAK_HEIGHT)
                        .getEmbeddedParameter().getValue();
                if (!heightRange.contains(peakHeight)) {
                    // Mark peak to be removed
                    keepPeak[i] = false;
                }
            }

            // Check # Data Points
            if (filterByDatapoints) {
                final Range<Integer> datapointsRange = parameters
                        .getParameter(PeakFilterParameters.PEAK_DATAPOINTS).getEmbeddedParameter().getValue();
                if (!datapointsRange.contains(peakDatapoints)) {
                    // Mark peak to be removed
                    keepPeak[i] = false;
                }
            }

            // Check FWHM
            if (filterByFWHM) {
                final Range<Double> fwhmRange = parameters.getParameter(PeakFilterParameters.PEAK_FWHM)
                        .getEmbeddedParameter().getValue();
                if (!fwhmRange.contains(peakFWHM)) {
                    // Mark peak to be removed
                    keepPeak[i] = false;
                }
            }

            // Check Tailing Factor
            if (filterByTailingFactor) {
                final Range<Double> tailingRange = parameters
                        .getParameter(PeakFilterParameters.PEAK_TAILINGFACTOR).getEmbeddedParameter()
                        .getValue();
                if (!tailingRange.contains(peakTailingFactor)) {
                    // Mark peak to be removed
                    keepPeak[i] = false;
                }
            }

            // Check height
            if (filterByAsymmetryFactor) {
                final Range<Double> asymmetryRange = parameters
                        .getParameter(PeakFilterParameters.PEAK_ASYMMETRYFACTOR).getEmbeddedParameter()
                        .getValue();
                if (!asymmetryRange.contains(peakAsymmetryFactor)) {
                    // Mark peak to be removed
                    keepPeak[i] = false;
                }
            }

        }

        newPeakList.addRow(copyPeakRow(row, keepPeak));

    }

    return newPeakList;
}

From source file:net.sf.mzmine.modules.peaklistmethods.alignment.ransac.RANSAC.java

/**
 * Take the initial points ramdoly. The points are divided by the initial
 * number of points. If the fractions contain enough number of points took
 * one point from each part.//from w w w.  j  a va 2  s  . c o m
 * 
 * @param data
 *            vector with the points which represent all possible
 *            alignments.
 * @return false if there is any problem.
 */
private boolean getInitN(List<AlignStructMol> data) {
    if (data.size() > n) {
        Collections.sort(data, new AlignStructMol());
        double min = data.get(0).RT;
        double max = data.get(data.size() - 1).RT;

        Range<Double> rtRange = Range.closed(min, ((max - min) / 2) + min);

        int cont = 0, bucle = 0;
        while (cont < n / 2 && bucle < 1000) {
            int index = (int) (data.size() * Math.random());
            if (!data.get(index).ransacMaybeInLiers && rtRange.contains(data.get(index).RT)) {
                data.get(index).ransacMaybeInLiers = true;
                cont++;

            }

            bucle++;
        }
        if (bucle >= 1000) {
            getN(data, (n / 2) - cont);
        }

        bucle = 0;
        rtRange = Range.closed(((max - min) / 2) + min, max);

        while (cont < n && bucle < 1000) {

            int index = (int) (data.size() * Math.random());
            if (!data.get(index).ransacMaybeInLiers && rtRange.contains(data.get(index).RT)) {
                data.get(index).ransacMaybeInLiers = true;
                cont++;
            }
            bucle++;
        }
        if (bucle >= 1000) {
            getN(data, n - cont);
        }
        return true;
    } else {
        return false;
    }
}