Example usage for com.google.common.collect RangeMap get

List of usage examples for com.google.common.collect RangeMap get

Introduction

In this page you can find the example usage for com.google.common.collect RangeMap get.

Prototype

@Nullable
V get(K key);

Source Link

Document

Returns the value associated with the specified key, or null if there is no such value.

Usage

From source file:org.apache.niolex.common.guava.GuavaCollections.java

/**
 * @param args//from   w w w .  j av  a2 s . c o  m
 */
public static void main(String[] args) {
    Multiset<String> wordsMultiset = HashMultiset.create();
    wordsMultiset.add("abc");
    wordsMultiset.add("abc");
    wordsMultiset.add("abcd");
    System.out.println("count => " + wordsMultiset.count("abc"));
    System.out.println("count => " + wordsMultiset.count("abcd"));

    BiMap<String, String> biMap = HashBiMap.create();
    biMap.put("good", "morning");
    biMap.put("bad", "afternoon");
    System.out.println("good => " + biMap.get("good"));
    System.out.println("afternoon => " + biMap.inverse().get("afternoon"));

    RangeMap<Integer, String> rangeMap = TreeRangeMap.create();
    rangeMap.put(Range.closed(1, 11), "Nice");
    rangeMap.put(Range.openClosed(11, 15), "Girl");
    System.out.println("11 => " + rangeMap.get(11));
    System.out.println("12 => " + rangeMap.get(12));
    System.out.println("15 => " + rangeMap.get(15));
    System.out.println("16 => " + rangeMap.get(16));

    List<Integer> countUp = Ints.asList(1, 2, 3, 4, 5);
    List<Integer> countDown = Lists.reverse(countUp); // {5, 4, 3, 2, 1}
    System.out.println("countUp => " + countUp);
    System.out.println("countDown => " + countDown);
}

From source file:org.sosy_lab.cpachecker.cfa.CSourceOriginMapping.java

public Pair<String, Integer> getOriginLineFromAnalysisCodeLine(String analysisFile, int analysisCodeLine) {
    RangeMap<Integer, Pair<String, Integer>> fileMapping = mapping.get(analysisFile);

    if (fileMapping != null) {
        Pair<String, Integer> originFileAndLineDelta = fileMapping.get(analysisCodeLine);

        if (originFileAndLineDelta != null) {
            return Pair.of(originFileAndLineDelta.getFirst(),
                    analysisCodeLine + originFileAndLineDelta.getSecond());
        }/*from w w w. j  a  v  a2 s  . c o m*/
    }
    return Pair.of(analysisFile, analysisCodeLine);
}

From source file:io.blobkeeper.cluster.service.ReplicationClientServiceImpl.java

@Override
public void replicate(@NotNull DifferenceInfo differenceInfo, @NotNull Address dst) {
    replicationStatistic.onReplicationRequest();

    Partition partition = partitionService.getById(differenceInfo.getDisk(), differenceInfo.getPartition());

    if (!isReplicationAvailable(partition, differenceInfo)) {
        return;/*ww  w .j  a  v a2  s.c o m*/
    }

    // TODO: calculate what types are different instead whole range
    RangeMap<Long, LeafNode> nodes = TreeRangeMap.create();

    differenceInfo.getDifference().stream().forEach(diff -> nodes.put(diff.getRange(), diff));

    log.info("File will be synced {}, dst node {}", differenceInfo, dst);

    File file = null;
    int sentElts = 1;
    try {
        file = fileListService.getFile(differenceInfo.getDisk(), differenceInfo.getPartition());
        if (null == file) {
            log.error("Can't replicate blob file {}, dst node {}", differenceInfo, dst);
            return;
        }

        List<IndexElt> elts = new ArrayList<>(indexService.getListByPartition(partition));

        // sort it by offset, to read file consequentially
        sort(elts, new IndexEltOffsetComparator());

        for (IndexElt elt : elts) {
            // not in diff
            if (null == nodes.get(elt.getId()) && !differenceInfo.isCompletelyDifferent()) {
                continue;
            }

            // pause to send new files
            if (sentElts % configuration.getReplicationMaxFiles() == 0) {
                sleep(configuration.getReplicationDelay());
            }

            ByteBuffer buffer;
            try {
                buffer = FileUtils.readFile(file, elt.getOffset(), elt.getLength());
            } catch (Exception e) {
                log.error("Can't read data for index {}, required {}", elt, elt.getLength(), e);
                continue;
            }

            byte[] bufferBytes = new byte[buffer.remaining()];
            buffer.get(bufferBytes);

            if (bufferBytes.length < elt.getLength()) {
                log.error("Can't replicate elt {}", elt);
                continue;
            }

            ReplicationFile replicationFile = new ReplicationFile(elt.getDiskIndexElt(), bufferBytes);
            try {
                replicate(replicationFile, dst);
            } catch (ReplicationServiceException e) {
                log.error("Can't replicate file {}", elt, e);

            }
        }
    } catch (Exception e) {
        log.error("Can't replicate block {}", partition, e);
    } finally {
        if (null != file) {
            try {
                file.close();
            } catch (Exception ignored) {
            }
        }
    }
}

From source file:org.corpus_tools.salt.common.tokenizer.Tokenizer.java

/**
 * The general task of this class is to tokenize a given text in the same
 * order as the tool TreeTagger will do. A list of tokenized text is
 * returned with the text anchor (start and end position) in original text.
 * If the {@link SDocumentGraph} already contains tokens, the tokens will be
 * preserved, if they overlap the same textual range as the new one.
 * Otherwise a {@link SSpan} is created covering corresponding to the
 * existing token. The span than overlaps all new tokens and contains all
 * annotations the old token did. In case, the span would overlaps the same
 * textual range as the old token did, no span is created.
 * /*from   w w  w .  java 2  s .  c  o  m*/
 * @param strInput
 *            original text
 * @return tokenized text fragments and their position in the original text
 */
public List<SToken> tokenizeToToken(STextualDS sTextualDS, LanguageCode language, Integer startPos,
        Integer endPos) {
    List<SToken> retVal = null;
    List<String> strTokens = null;
    String strInput = sTextualDS.getText().substring(startPos, endPos);

    strTokens = tokenizeToString(strInput, language);
    if (strTokens.size() > 0) {
        char[] chrText = strInput.toCharArray();
        int tokenCntr = 0;

        // check if tokens exist for passed span
        List<SToken> tokens = null;
        if ((startPos != 0) || (endPos != sTextualDS.getText().length())
                || (getDocumentGraph().getTextualDSs().size() > 1)) {
            DataSourceSequence sequence = new DataSourceSequence();
            sequence.setDataSource(sTextualDS);
            sequence.setStart(startPos);
            sequence.setEnd(endPos);
            tokens = getDocumentGraph().getTokensBySequence(sequence);
        } else {
            tokens = getDocumentGraph().getTokens();
        }

        RangeMap<Integer, SToken> oldTokens = null;
        // create an organization structure for a tokens interval which
        // corresponds to a token
        if ((tokens != null) && (tokens.size() != 0)) {
            if ((getDocumentGraph().getTextualRelations() != null)
                    && (getDocumentGraph().getTextualRelations().size() > 0)) {
                oldTokens = TreeRangeMap.create();
                for (STextualRelation rel : getDocumentGraph().getTextualRelations()) {
                    oldTokens.put(Range.closed(rel.getStart(), rel.getEnd()), rel.getSource());
                }
            }
        }
        // a map mapping new created tokens, to old already existing tokens.
        // The old tokens should be removed later on and spans should be
        // created instead
        Multimap<SToken, SToken> old2newToken = ArrayListMultimap.create();

        for (int i = 0; i < chrText.length; i++) {
            if ((strTokens.get(tokenCntr).length() < 1)
                    || (strTokens.get(tokenCntr).substring(0, 1).equals(String.valueOf(chrText[i])))) {
                // first letter matches
                StringBuffer pattern = new StringBuffer();
                for (int y = 0; y < strTokens.get(tokenCntr).length(); y++) {
                    // compute pattern in text
                    pattern.append(chrText[i + y]);
                } // compute pattern in text
                if (strTokens.get(tokenCntr).hashCode() == pattern.toString().hashCode()) {
                    // pattern found
                    int start = i + startPos;
                    int end = i + startPos + strTokens.get(tokenCntr).length();

                    if (this.getDocumentGraph() == null) {
                        throw new SaltTokenizerException(
                                "Cannot add tokens to an empty SDocumentGraph object.");
                    }

                    SToken sTok = this.getDocumentGraph().createToken(sTextualDS, start, end);
                    if (retVal == null) {
                        retVal = new ArrayList<SToken>();
                    }
                    retVal.add(sTok);
                    i = i + strTokens.get(tokenCntr).length() - 1;
                    tokenCntr++;
                    if (tokenCntr >= strTokens.size()) {
                        break;
                    }

                    /**
                     * check, if there is an old token, overlapping the same
                     * or a bigger span as the currently created one. If
                     * yes, remove the old one and create a span overlapping
                     * the new one.
                     **/
                    if (oldTokens != null) {
                        SToken oldToken = oldTokens.get(start);
                        if (oldToken != null) {
                            old2newToken.put(oldToken, sTok);
                        }
                    }

                } // pattern found
            } // first letter matches
        }

        if (old2newToken != null) {
            for (SToken oldToken : old2newToken.keySet()) {
                // create span for oldToken
                List<SToken> overlappedTokens = new ArrayList<SToken>(old2newToken.get(oldToken));
                if (overlappedTokens.size() == 1) {
                    getDocumentGraph().removeNode(overlappedTokens.get(0));
                } else {

                    SSpan span = getDocumentGraph().createSpan(overlappedTokens);

                    // move all annotations from old token to span
                    for (SAnnotation sAnno : oldToken.getAnnotations()) {
                        span.addAnnotation(sAnno);
                    }

                    // redirect all relations to span
                    List<SRelation<SNode, SNode>> inRels = new ArrayList<>();
                    for (SRelation rel : getDocumentGraph().getInRelations(oldToken.getId())) {
                        inRels.add(rel);
                    }
                    for (SRelation inRel : inRels) {
                        if (inRel instanceof SSpanningRelation) {
                            // in case of edge is a SSpanningRelation remove
                            // it and create new ones for each token under
                            // the span
                            if (inRel.getSource() instanceof SSpan) {
                                SSpan parentSpan = (SSpan) inRel.getSource();
                                getDocumentGraph().removeRelation(inRel);
                                for (SToken overlappedToken : overlappedTokens) {
                                    SSpanningRelation rel = SaltFactory.createSSpanningRelation();
                                    rel.setSource(parentSpan);
                                    rel.setTarget(overlappedToken);
                                    getDocumentGraph().addRelation(rel);
                                }
                            }
                        } else {
                            inRel.setTarget(span);
                        }
                    }
                    List<SRelation<SNode, SNode>> outRels = new ArrayList<>();
                    for (SRelation outRel : getDocumentGraph().getOutRelations(oldToken.getId())) {
                        if (!(outRel instanceof STextualRelation)) {
                            outRels.add(outRel);
                        }
                    }
                    for (SRelation outRel : outRels) {
                        outRel.setSource(span);
                    }
                    getDocumentGraph().removeNode(oldToken);
                }
            }
        }
    }
    return (retVal);
}