Example usage for org.apache.lucene.util CharsRefBuilder length

List of usage examples for org.apache.lucene.util CharsRefBuilder length

Introduction

In this page you can find the example usage for org.apache.lucene.util CharsRefBuilder length.

Prototype

public int length() 

Source Link

Document

Return the number of chars in this buffer.

Usage

From source file:org.apache.solr.schema.EnumFieldType.java

License:Apache License

@Override
public CharsRef indexedToReadable(BytesRef input, CharsRefBuilder output) {
    final Integer intValue = NumericUtils.sortableBytesToInt(input.bytes, 0);
    final String stringValue = enumMapping.intValueToStringValue(intValue);
    output.grow(stringValue.length());//from   w w  w. j av a2 s  .co m
    output.setLength(stringValue.length());
    stringValue.getChars(0, output.length(), output.chars(), 0);
    return output.get();
}

From source file:org.apache.solr.schema.PointField.java

License:Apache License

@Override
public CharsRef indexedToReadable(BytesRef indexedForm, CharsRefBuilder charsRef) {
    final String value = indexedToReadable(indexedForm);
    charsRef.grow(value.length());/*w w w . j a  va2 s. c  o  m*/
    charsRef.setLength(value.length());
    value.getChars(0, charsRef.length(), charsRef.chars(), 0);
    return charsRef.get();
}

From source file:org.codelibs.elasticsearch.search.suggest.phrase.NoisyChannelSpellChecker.java

License:Apache License

public TokenStream tokenStream(Analyzer analyzer, BytesRef query, CharsRefBuilder spare, String field)
        throws IOException {
    spare.copyUTF8Bytes(query);/*  www.ja v  a 2 s. c  o  m*/
    return analyzer.tokenStream(field, new FastCharArrayReader(spare.chars(), 0, spare.length()));
}

From source file:org.elasticsearch.search.suggest.completion.old.CompletionSuggester.java

License:Apache License

@Override
protected Suggest.Suggestion<? extends Suggest.Suggestion.Entry<? extends Suggest.Suggestion.Entry.Option>> innerExecute(
        String name, CompletionSuggestionContext suggestionContext, IndexSearcher searcher,
        CharsRefBuilder spare) throws IOException {
    if (suggestionContext.fieldType() == null) {
        throw new ElasticsearchException(
                "Field [" + suggestionContext.getField() + "] is not a completion suggest field");
    }/* ww w .  jav a  2  s  . c  o  m*/
    final IndexReader indexReader = searcher.getIndexReader();
    CompletionSuggestion completionSuggestion = new CompletionSuggestion(name, suggestionContext.getSize());
    spare.copyUTF8Bytes(suggestionContext.getText());

    CompletionSuggestion.Entry completionSuggestEntry = new CompletionSuggestion.Entry(
            new StringText(spare.toString()), 0, spare.length());
    completionSuggestion.addTerm(completionSuggestEntry);

    String fieldName = suggestionContext.getField();
    Map<String, CompletionSuggestion.Entry.Option> results = Maps
            .newHashMapWithExpectedSize(indexReader.leaves().size() * suggestionContext.getSize());
    for (LeafReaderContext atomicReaderContext : indexReader.leaves()) {
        LeafReader atomicReader = atomicReaderContext.reader();
        Terms terms = atomicReader.fields().terms(fieldName);
        if (terms instanceof Completion090PostingsFormat.CompletionTerms) {
            final Completion090PostingsFormat.CompletionTerms lookupTerms = (Completion090PostingsFormat.CompletionTerms) terms;
            final Lookup lookup = lookupTerms.getLookup(suggestionContext.fieldType(), suggestionContext);
            if (lookup == null) {
                // we don't have a lookup for this segment.. this might be possible if a merge dropped all
                // docs from the segment that had a value in this segment.
                continue;
            }
            List<Lookup.LookupResult> lookupResults = lookup.lookup(spare.get(), false,
                    suggestionContext.getSize());
            for (Lookup.LookupResult res : lookupResults) {

                final String key = res.key.toString();
                final float score = res.value;
                final CompletionSuggestion.Entry.Option value = results.get(key);
                if (value == null) {
                    final CompletionSuggestion.Entry.Option option = new CompletionSuggestion.Entry.Option(
                            new StringText(key), score,
                            res.payload == null ? null : new BytesArray(res.payload));
                    results.put(key, option);
                } else if (value.getScore() < score) {
                    value.setScore(score);
                    value.setPayload(res.payload == null ? null : new BytesArray(res.payload));
                }
            }
        }
    }
    final List<CompletionSuggestion.Entry.Option> options = new ArrayList<>(results.values());
    CollectionUtil.introSort(options, scoreComparator);

    int optionCount = Math.min(suggestionContext.getSize(), options.size());
    for (int i = 0; i < optionCount; i++) {
        completionSuggestEntry.addOption(options.get(i));
    }

    return completionSuggestion;
}

From source file:org.elasticsearch.search.suggest.filteredsuggest.FilteredSuggestSuggester.java

License:Apache License

@Override
protected Suggest.Suggestion<? extends Suggest.Suggestion.Entry<? extends Suggest.Suggestion.Entry.Option>> innerExecute(
        String name, final FilteredSuggestSuggestionContext suggestionContext, final IndexSearcher searcher,
        CharsRefBuilder spare) throws IOException {
    if (suggestionContext.getFieldType() != null) {
        FilteredSuggestSuggestion completionSuggestion = new FilteredSuggestSuggestion(name,
                suggestionContext.getSize());
        spare.copyUTF8Bytes(suggestionContext.getText());
        Map<String, FilteredSuggestSuggestion.Entry.Option> results = new HashMap<>(
                suggestionContext.getSize());

        FilteredSuggestSuggestion.Entry completionSuggestEntry = new FilteredSuggestSuggestion.Entry(
                new Text(spare.toString()), 0, spare.length());
        completionSuggestion.addTerm(completionSuggestEntry);

        // TODO scoring catch here : scoring will be done per query , as we
        // have to intersect the results for the queries,
        // though we score and pick the first n results per query, we will
        // will not be able to score across filters.

        Set<String> finalKeySet = null;
        Set<String> keys = new HashSet<>();
        for (CompletionQuery compQuery : suggestionContext.toQueries()) {
            TopSuggestDocsCollector collector = new TopDocumentsCollector(suggestionContext.getSize());
            CompletionSuggester.suggest(searcher, compQuery, collector);

            keys.clear();//from  w  w w  .ja va2 s .  c  o  m
            for (TopSuggestDocs.SuggestScoreDoc suggestScoreDoc : collector.get().scoreLookupDocs()) {
                TopDocumentsCollector.SuggestDoc suggestDoc = (TopDocumentsCollector.SuggestDoc) suggestScoreDoc;
                // this code is to collect the contexts, we do not need them
                // // collect contexts
                // Map<String, Set<CharSequence>> contexts =
                // Collections.emptyMap();
                // if (fieldType.hasFilterMappings() &&
                // suggestDoc.getContexts().isEmpty() == false) {
                // contexts =
                // fieldType.getFilterMappings().getNamedContexts(suggestDoc.getContexts());
                // }

                // Map<String, Set<CharSequence>> contexts =
                // Collections.emptyMap();
                // ENTRY.OPTION for CompletionSuggest has a field called
                // context , which in our case is irrelevant.

                for (CharSequence matchedKey : suggestDoc.getKeys()) {
                    final String key = matchedKey.toString();
                    final float score = suggestDoc.score;
                    final FilteredSuggestSuggestion.Entry.Option value = results.get(key);
                    if (value == null) {
                        FilteredSuggestSuggestion.Entry.Option option = new FilteredSuggestSuggestion.Entry.Option(
                                suggestDoc.doc, new Text(key), suggestDoc.score);
                        results.put(key, option);
                        keys.add(key);
                    } else if (value.getScore() < score) {
                        results.put(key, new FilteredSuggestSuggestion.Entry.Option(suggestDoc.doc,
                                new Text(key), suggestDoc.score));
                        keys.add(key);
                    } else {
                        keys.add(key);
                    }
                }
            }

            // across filters its an AND like operation hence retain only
            // those which exist as results across all filters
            if (finalKeySet != null) {
                finalKeySet.retainAll(keys);
            } else {
                finalKeySet = new HashSet<>();
                finalKeySet.addAll(keys);
            }
        }

        // retain filter level intersected results
        results.keySet().retainAll(finalKeySet);

        final List<FilteredSuggestSuggestion.Entry.Option> options = new ArrayList<>(results.values());
        CollectionUtil.introSort(options, Suggest.COMPARATOR);

        int optionCount = Math.min(suggestionContext.getSize(), options.size());
        for (int i = 0; i < optionCount; i++) {
            completionSuggestEntry.addOption(options.get(i));
        }
        return completionSuggestion;
    }
    return null;
}