Example usage for org.apache.lucene.search.spans SpanMultiTermQueryWrapper SpanMultiTermQueryWrapper

List of usage examples for org.apache.lucene.search.spans SpanMultiTermQueryWrapper SpanMultiTermQueryWrapper

Introduction

In this page you can find the example usage for org.apache.lucene.search.spans SpanMultiTermQueryWrapper SpanMultiTermQueryWrapper.

Prototype

@SuppressWarnings({ "rawtypes", "unchecked" })
public SpanMultiTermQueryWrapper(Q query) 

Source Link

Document

Create a new SpanMultiTermQueryWrapper.

Usage

From source file:brightsolid.solr.plugins.TestTargetPositionQueryFuzzy.java

License:Apache License

public void testTargetPositionFuzzy() throws Exception {
    FuzzyQuery fq = new FuzzyQuery(new Term("field", "three"));
    SpanQuery stq = new SpanMultiTermQueryWrapper<FuzzyQuery>(fq);
    SpanQuery tpq = new SpanTargetPositionQuery(stq, 1);
    TopDocs td = searcher.search(tpq, 10);

    assertEquals(fieldValue(td, 0), "two threx one");
    assertEquals(3, td.totalHits);/*w w w.  j av a 2 s  . c om*/
}

From source file:brightsolid.solr.plugins.TestTargetPositionQueryFuzzy.java

License:Apache License

public void testTargetPositionPrefix() throws Exception {
    PrefixQuery fq = new PrefixQuery(new Term("field", "tw"));
    SpanQuery stq = new SpanMultiTermQueryWrapper<PrefixQuery>(fq);
    SpanQuery tpq = new SpanTargetPositionQuery(stq, 2);
    TopDocs td = searcher.search(tpq, 10);

    assertEquals(fieldValue(td, 0), "threx one twp");
    assertEquals(3, td.totalHits);//from  w ww .  j a v a2  s . c  om
}

From source file:brightsolid.solr.plugins.TestTargetPositionQueryFuzzy.java

License:Apache License

public void testTargetPositionWildcard() throws Exception {
    WildcardQuery fq = new WildcardQuery(new Term("field", "tw*"));
    SpanQuery stq = new SpanMultiTermQueryWrapper<WildcardQuery>(fq);
    SpanQuery tpq = new SpanTargetPositionQuery(stq, 2);
    TopDocs td = searcher.search(tpq, 10);

    assertEquals(fieldValue(td, 0), "threx one twp");
    assertEquals(3, td.totalHits);//  w  w  w .j a  v a  2s . c  om
}

From source file:de.faustedition.genesis.lines.VerseManager.java

License:Open Source License

public Iterable<LayerNode<JsonNode>> fulltextQuery(String queryString) {
    Preconditions.checkState(this.faustGraph.getDb().index().existsForNodes(INDEX_VERSE_FULLTEXT));
    Index<Node> verseFulltextIndex = this.faustGraph.getDb().index().forNodes(INDEX_VERSE_FULLTEXT);

    // construct a fuzzy query
    String normalizedQueryString = Normalization.normalize(queryString);
    ArrayList<String> tokens = Lists.newArrayList(normalizedQueryString.split(" "));
    Function<String, SpanMultiTermQueryWrapper> stringToQuery = new Function<String, SpanMultiTermQueryWrapper>() {
        @Override/*from   w  w w. ja v a2s . co m*/
        public SpanMultiTermQueryWrapper apply(@Nullable String input) {
            Term term = new Term("fulltext", input.toLowerCase());
            FuzzyQuery fuzzyQuery = new FuzzyQuery(term);
            SpanMultiTermQueryWrapper spanMultiTermQueryWrapper = new SpanMultiTermQueryWrapper(fuzzyQuery);
            return spanMultiTermQueryWrapper;
        }
    };

    SpanMultiTermQueryWrapper[] clauses = (Lists.newArrayList(Iterables.transform(tokens, stringToQuery))
            .toArray(new SpanMultiTermQueryWrapper[tokens.size()]));

    SpanNearQuery query = new SpanNearQuery(clauses, 5, false);
    IndexHits<Node> verseResults = verseFulltextIndex.query(query);

    Function<Node, LayerNode<JsonNode>> wrapLayerNodes = new Function<Node, LayerNode<JsonNode>>() {
        @Override
        public LayerNode apply(@Nullable Node input) {
            return new LayerNode<JsonNode>(textRepository, input);
        }
    };

    return Iterables.transform(verseResults, wrapLayerNodes);
}

From source file:de.mirkosertic.desktopsearch.QueryParser.java

License:Open Source License

public Query parse(String aQuery, String aSearchField) throws IOException {

    QueryTokenizer theTokenizer = new QueryTokenizer(aQuery);

    // Now we have the terms, lets construct the query

    BooleanQuery theResult = new BooleanQuery();

    if (!theTokenizer.getRequiredTerms().isEmpty()) {

        List<SpanQuery> theSpans = new ArrayList<>();
        for (String theTerm : theTokenizer.getRequiredTerms()) {
            if (QueryUtils.isWildCard(theTerm)) {
                theSpans.add(/*from   w w  w  .j  a va  2 s. com*/
                        new SpanMultiTermQueryWrapper<>(new WildcardQuery(new Term(aSearchField, theTerm))));
            } else if (QueryUtils.isFuzzy(theTerm)) {
                theSpans.add(new SpanMultiTermQueryWrapper<>(new FuzzyQuery(new Term(aSearchField, theTerm))));
            } else {
                // Ok, we need to check of the token would be removed due to stopwords and so on
                String theTokenizedTerm = toToken(theTerm, aSearchField);
                if (!StringUtils.isEmpty(theTokenizedTerm)) {
                    theSpans.add(new SpanTermQuery(new Term(aSearchField, theTokenizedTerm)));
                }
            }
        }

        // This is the original span, so we boost it a lot
        SpanQuery theExactMatchQuery = new SpanNearQuery(theSpans.toArray(new SpanQuery[theSpans.size()]), 0,
                true);
        theExactMatchQuery.setBoost(61);
        theResult.add(theExactMatchQuery, BooleanClause.Occur.SHOULD);

        // We expect a maximum edit distance of 10 between the searched terms in any order
        // This seems to be the most useful value
        int theMaxEditDistance = 10;
        for (int theSlop = 0; theSlop < theMaxEditDistance; theSlop++) {
            SpanQuery theNearQuery = new SpanNearQuery(theSpans.toArray(new SpanQuery[theSpans.size()]),
                    theSlop, false);
            theNearQuery.setBoost(50 + theMaxEditDistance - theSlop);
            theResult.add(theNearQuery, BooleanClause.Occur.SHOULD);
        }

        // Finally, we just add simple term queries, but do not boost them
        // This makes sure that at least the searched terms
        // are found in the document
        addToBooleanQuery(theTokenizer.getRequiredTerms(), aSearchField, theResult, BooleanClause.Occur.MUST);
    }

    // Finally, add the terms that must not occur in the search result
    addToBooleanQuery(theTokenizer.getNotRequiredTerms(), aSearchField, theResult,
            BooleanClause.Occur.MUST_NOT);

    return theResult;
}

From source file:de.mirkosertic.desktopsearch.SearchPhraseSuggester.java

License:Open Source License

public List<Suggestion> suggestSearchPhrase(String aFieldName, String aPhrase) throws IOException {

    LOGGER.info("Trying to find suggestions for phrase " + aPhrase);

    long theStartTime = System.currentTimeMillis();
    try {//from  w w  w.  j  a  v  a  2 s.c  o m
        List<String> theTokens = toTokens(aFieldName, aPhrase);

        List<SpanQuery> theSpanQueries = theTokens.stream().map(s -> {
            if (QueryUtils.isWildCard(s)) {
                WildcardQuery theWildcardQuery = new WildcardQuery(new Term(aFieldName, s));
                SpanMultiTermQueryWrapper theWrapper = new SpanMultiTermQueryWrapper(theWildcardQuery);
                try {
                    return theWrapper.getRewriteMethod().rewrite(indexReader, theWildcardQuery);
                } catch (IOException e) {
                    throw new RuntimeException(e);
                }
            }
            return new SpanTermQuery(new Term(aFieldName, s));
        }).collect(Collectors.toList());

        SpanQuery theSpanQuery = new SpanNearQuery(theSpanQueries.toArray(new SpanQuery[theSpanQueries.size()]),
                configuration.getSuggestionSlop(), configuration.isSuggestionInOrder());

        LOGGER.info("created span query " + theSpanQuery);

        LeafReader theAtomicReader = SlowCompositeReaderWrapper.wrap(indexReader);

        Map<Term, TermContext> theTermContexts = new HashMap<>();
        Map<String, Long> theSpanFrequencies = new HashMap<>();

        // These are all the matching spans over all documents
        Spans theMatchingSpans = theSpanQuery.getSpans(theAtomicReader.getContext(),
                new Bits.MatchAllBits(indexReader.numDocs()), theTermContexts);

        while (theMatchingSpans.next()) {

            // This maps the position of a term and the term string itself
            // the positions must be in order, so we have to use a treemap.
            Map<Integer, String> theEntries = new TreeMap<>();

            Terms theAllTermsFromDocument = indexReader.getTermVector(theMatchingSpans.doc(),
                    IndexFields.CONTENT_NOT_STEMMED);
            int theSpanStart = theMatchingSpans.start() - configuration.getSuggestionWindowBefore();
            int theSpanEnd = theMatchingSpans.end() + configuration.getSuggestionWindowAfter();
            TermsEnum theTermsEnum = theAllTermsFromDocument.iterator(null);
            BytesRef theTerm;
            while ((theTerm = theTermsEnum.next()) != null) {
                DocsAndPositionsEnum thePositionEnum = theTermsEnum.docsAndPositions(null, null);
                if (thePositionEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
                    int i = 0;
                    int position;
                    while (i < thePositionEnum.freq() && (position = thePositionEnum.nextPosition()) != -1) {
                        if (position >= theSpanStart && position <= theSpanEnd) {
                            theEntries.put(position, theTerm.utf8ToString());
                        }
                        i++;
                    }
                }
            }

            StringBuilder theResultString = new StringBuilder();
            theEntries.entrySet().forEach(e -> {
                if (theResultString.length() > 0) {
                    theResultString.append(" ");
                }
                theResultString.append(e.getValue());
            });

            String theTotalSpan = theResultString.toString().trim();

            Long theFrequency = theSpanFrequencies.get(theTotalSpan);
            if (theFrequency == null) {
                theSpanFrequencies.put(theTotalSpan, 1L);
            } else {
                theSpanFrequencies.put(theTotalSpan, theFrequency + 1);
            }
        }

        return theSpanFrequencies.entrySet().stream().filter(t -> t.getValue() > 1)
                .sorted((o1, o2) -> o2.getValue().compareTo(o1.getValue()))
                .limit(configuration.getNumberOfSuggestions())
                .map(T -> new Suggestion(highlight(T.getKey(), theTokens), T.getKey()))
                .collect(Collectors.toList());
    } finally {
        long theDuration = System.currentTimeMillis() - theStartTime;
        LOGGER.info("Took " + theDuration + "ms");
    }
}

From source file:es.ua.labidiomas.corpus.searcher.Searcher.java

private BooleanQuery _prepareLetterQuery(SearchConfiguration params, SpanQuery query) throws ParseException {
    BooleanQuery searchQuery = new BooleanQuery();

    SpanQuery prefixQUery = new SpanMultiTermQueryWrapper(
            new PrefixQuery(new Term("text", params.getSort().getLetter())));

    SpanNearQuery spanNear1 = new SpanNearQuery(new SpanQuery[] { query, prefixQUery },
            params.getSort().getPosition() - 1, true);

    if (params.getSort().getPosition() != 1) {
        SpanNearQuery spanNear2 = new SpanNearQuery(new SpanQuery[] { query, prefixQUery },
                params.getSort().getPosition() - 2, true);

        SpanNotQuery textQUery = new SpanNotQuery(spanNear1, spanNear2);

        searchQuery.add(textQUery, BooleanClause.Occur.MUST);
    } else {//from  w  ww.  j  a  v  a2s. c  o m
        searchQuery.add(spanNear1, BooleanClause.Occur.MUST);
    }

    searchQuery.add(_prepareDiscourseQuery(params), BooleanClause.Occur.MUST);

    return searchQuery;
}

From source file:io.vertigo.dynamo.plugins.collections.lucene.RamLuceneQueryFactory.java

License:Apache License

private static Query createParsedKeywordsQuery(final Analyzer queryAnalyser, final String fieldName,
        final String keywords) throws IOException {
    final Builder queryBuilder = new BooleanQuery.Builder();
    final Reader reader = new StringReader(keywords);
    try (final TokenStream tokenStream = queryAnalyser.tokenStream(fieldName, reader)) {
        tokenStream.reset();/*  w w w .  ja va2 s.  c  o  m*/
        try {
            final CharTermAttribute termAttribute = tokenStream.getAttribute(CharTermAttribute.class);
            while (tokenStream.incrementToken()) {
                final String term = new String(termAttribute.buffer(), 0, termAttribute.length());
                final PrefixQuery prefixQuery = new PrefixQuery(new Term(fieldName, term));
                queryBuilder.add(prefixQuery, BooleanClause.Occur.MUST);
                final SpanFirstQuery spanSecondQuery = new SpanFirstQuery(
                        new SpanMultiTermQueryWrapper<>(prefixQuery), 1);
                queryBuilder.add(spanSecondQuery, BooleanClause.Occur.SHOULD);
            }
        } finally {
            reader.reset();
            tokenStream.end();
        }
    }
    return queryBuilder.build();
}

From source file:org.alfresco.solr.query.Solr4QueryParser.java

License:Open Source License

@SuppressWarnings("unchecked")
protected Query getFieldQueryImpl(String field, String queryText, AnalysisMode analysisMode,
        LuceneFunction luceneFunction) throws ParseException, IOException {
    // make sure the field exists or return a dummy query so we have no
    // error ....ACE-3231
    SchemaField schemaField = schema.getFieldOrNull(field);
    boolean isNumeric = false;
    if (schemaField == null) {
        return new TermQuery(new Term("_dummy_", "_miss_"));
    } else {/*from  www  . j a  v a2s .  c om*/
        isNumeric = (schemaField.getType().getNumericType() != null);
        if (isNumeric) {
            //Check to see if queryText is numeric or else it will fail.
            try {
                Double.valueOf(queryText);
            } catch (NumberFormatException e) {
                return new TermQuery(new Term("_dummy_", "_miss_"));
            }
        }
    }

    // Use the analyzer to get all the tokens, and then build a TermQuery,
    // PhraseQuery, or noth

    // TODO: Untokenised columns with functions require special handling

    if (luceneFunction != LuceneFunction.FIELD) {
        throw new UnsupportedOperationException(
                "Field queries are not supported on lucene functions (UPPER, LOWER, etc)");
    }

    // if the incoming string already has a language identifier we strip it
    // iff and addit back on again

    String localePrefix = "";

    String toTokenise = queryText;

    if (queryText.startsWith("{")) {
        int position = queryText.indexOf("}");
        if (position > 0) {
            String language = queryText.substring(0, position + 1);
            Locale locale = new Locale(queryText.substring(1, position));
            String token = queryText.substring(position + 1);
            boolean found = false;
            for (Locale current : Locale.getAvailableLocales()) {
                if (current.toString().equalsIgnoreCase(locale.toString())) {
                    found = true;
                    break;
                }
            }
            if (found) {
                localePrefix = language;
                toTokenise = token;
            } else {
                // toTokenise = token;
            }
        }
    }

    String testText = toTokenise;
    boolean requiresMLTokenDuplication = false;
    String localeString = null;
    if (isPropertyField(field) && (localePrefix.length() == 0)) {
        if ((queryText.length() > 0) && (queryText.charAt(0) == '\u0000')) {
            int position = queryText.indexOf("\u0000", 1);
            testText = queryText.substring(position + 1);
            requiresMLTokenDuplication = true;
            localeString = queryText.substring(1, position);

        }
    }

    // find the positions of any escaped * and ? and ignore them

    Set<Integer> wildcardPoistions = getWildcardPositions(testText);

    TokenStream source = null;
    ArrayList<PackedTokenAttributeImpl> list = new ArrayList<PackedTokenAttributeImpl>();
    boolean severalTokensAtSamePosition = false;
    PackedTokenAttributeImpl nextToken;
    int positionCount = 0;

    try {
        source = getAnalyzer().tokenStream(field, new StringReader(toTokenise));
        source.reset();
        while (source.incrementToken()) {
            CharTermAttribute cta = source.getAttribute(CharTermAttribute.class);
            OffsetAttribute offsetAtt = source.getAttribute(OffsetAttribute.class);
            TypeAttribute typeAtt = null;
            if (source.hasAttribute(TypeAttribute.class)) {
                typeAtt = source.getAttribute(TypeAttribute.class);
            }
            PositionIncrementAttribute posIncAtt = null;
            if (source.hasAttribute(PositionIncrementAttribute.class)) {
                posIncAtt = source.getAttribute(PositionIncrementAttribute.class);
            }
            nextToken = new PackedTokenAttributeImpl();
            nextToken.setEmpty().copyBuffer(cta.buffer(), 0, cta.length());
            nextToken.setOffset(offsetAtt.startOffset(), offsetAtt.endOffset());
            if (typeAtt != null) {
                nextToken.setType(typeAtt.type());
            }
            if (posIncAtt != null) {
                nextToken.setPositionIncrement(posIncAtt.getPositionIncrement());
            }

            list.add(nextToken);
            if (nextToken.getPositionIncrement() != 0)
                positionCount += nextToken.getPositionIncrement();
            else
                severalTokensAtSamePosition = true;
        }
    } finally {
        try {
            if (source != null) {
                source.close();
            }
        } catch (IOException e) {
            // ignore
        }
    }

    // add any alpha numeric wildcards that have been missed
    // Fixes most stop word and wild card issues

    for (int index = 0; index < testText.length(); index++) {
        char current = testText.charAt(index);
        if (((current == '*') || (current == '?')) && wildcardPoistions.contains(index)) {
            StringBuilder pre = new StringBuilder(10);
            if (index == 0) {
                // "*" and "?" at the start

                boolean found = false;
                for (int j = 0; j < list.size(); j++) {
                    PackedTokenAttributeImpl test = list.get(j);
                    if ((test.startOffset() <= 0) && (0 < test.endOffset())) {
                        found = true;
                        break;
                    }
                }
                if (!found && (list.size() == 0)) {
                    // Add new token followed by * not given by the
                    // tokeniser
                    PackedTokenAttributeImpl newToken = new PackedTokenAttributeImpl();
                    newToken.setEmpty().append("", 0, 0);
                    newToken.setType("ALPHANUM");
                    if (requiresMLTokenDuplication) {
                        Locale locale = I18NUtil.parseLocale(localeString);
                        @SuppressWarnings("resource")
                        MLTokenDuplicator duplicator = new MLTokenDuplicator(locale,
                                MLAnalysisMode.EXACT_LANGUAGE);
                        Iterator<PackedTokenAttributeImpl> it = duplicator.buildIterator(newToken);
                        if (it != null) {
                            int count = 0;
                            while (it.hasNext()) {
                                list.add(it.next());
                                count++;
                                if (count > 1) {
                                    severalTokensAtSamePosition = true;
                                }
                            }
                        }
                    }
                    // content
                    else {
                        list.add(newToken);
                    }
                }
            } else if (index > 0) {
                // Add * and ? back into any tokens from which it has been
                // removed

                boolean tokenFound = false;
                for (int j = 0; j < list.size(); j++) {
                    PackedTokenAttributeImpl test = list.get(j);
                    if ((test.startOffset() <= index) && (index < test.endOffset())) {
                        if (requiresMLTokenDuplication) {
                            String termText = test.toString();
                            int position = termText.indexOf("}");
                            String language = termText.substring(0, position + 1);
                            String token = termText.substring(position + 1);
                            if (index >= test.startOffset() + token.length()) {
                                test.setEmpty();
                                test.append(language + token + current);
                            }
                        } else {
                            if (index >= test.startOffset() + test.length()) {
                                test.setEmpty();
                                test.append(test.toString() + current);
                            }
                        }
                        tokenFound = true;
                        break;
                    }
                }

                if (!tokenFound) {
                    for (int i = index - 1; i >= 0; i--) {
                        char c = testText.charAt(i);
                        if (Character.isLetterOrDigit(c)) {
                            boolean found = false;
                            for (int j = 0; j < list.size(); j++) {
                                PackedTokenAttributeImpl test = list.get(j);
                                if ((test.startOffset() <= i) && (i < test.endOffset())) {
                                    found = true;
                                    break;
                                }
                            }
                            if (found) {
                                break;
                            } else {
                                pre.insert(0, c);
                            }
                        } else {
                            break;
                        }
                    }
                    if (pre.length() > 0) {
                        // Add new token followed by * not given by the
                        // tokeniser
                        PackedTokenAttributeImpl newToken = new PackedTokenAttributeImpl();
                        newToken.setEmpty().append(pre.toString());
                        newToken.setOffset(index - pre.length(), index);
                        newToken.setType("ALPHANUM");
                        if (requiresMLTokenDuplication) {
                            Locale locale = I18NUtil.parseLocale(localeString);
                            @SuppressWarnings("resource")
                            MLTokenDuplicator duplicator = new MLTokenDuplicator(locale,
                                    MLAnalysisMode.EXACT_LANGUAGE);
                            Iterator<PackedTokenAttributeImpl> it = duplicator.buildIterator(newToken);
                            if (it != null) {
                                int count = 0;
                                while (it.hasNext()) {
                                    list.add(it.next());
                                    count++;
                                    if (count > 1) {
                                        severalTokensAtSamePosition = true;
                                    }
                                }
                            }
                        }
                        // content
                        else {
                            list.add(newToken);
                        }
                    }
                }
            }

            StringBuilder post = new StringBuilder(10);
            if (index > 0) {
                for (int i = index + 1; i < testText.length(); i++) {
                    char c = testText.charAt(i);
                    if (Character.isLetterOrDigit(c)) {
                        boolean found = false;
                        for (int j = 0; j < list.size(); j++) {
                            PackedTokenAttributeImpl test = list.get(j);
                            if ((test.startOffset() <= i) && (i < test.endOffset())) {
                                found = true;
                                break;
                            }
                        }
                        if (found) {
                            break;
                        } else {
                            post.append(c);
                        }
                    } else {
                        break;
                    }
                }
                if (post.length() > 0) {
                    // Add new token followed by * not given by the
                    // tokeniser
                    PackedTokenAttributeImpl newToken = new PackedTokenAttributeImpl();
                    newToken.setEmpty().append(post.toString());
                    newToken.setOffset(index + 1, index + 1 + post.length());
                    newToken.setType("ALPHANUM");
                    if (requiresMLTokenDuplication) {
                        Locale locale = I18NUtil.parseLocale(localeString);
                        @SuppressWarnings("resource")
                        MLTokenDuplicator duplicator = new MLTokenDuplicator(locale,
                                MLAnalysisMode.EXACT_LANGUAGE);
                        Iterator<PackedTokenAttributeImpl> it = duplicator.buildIterator(newToken);
                        if (it != null) {
                            int count = 0;
                            while (it.hasNext()) {
                                list.add(it.next());
                                count++;
                                if (count > 1) {
                                    severalTokensAtSamePosition = true;
                                }
                            }
                        }
                    }
                    // content
                    else {
                        list.add(newToken);
                    }
                }
            }

        }
    }

    // Put in real position increments as we treat them correctly

    int curentIncrement = -1;
    for (PackedTokenAttributeImpl c : list) {
        if (curentIncrement == -1) {
            curentIncrement = c.getPositionIncrement();
        } else if (c.getPositionIncrement() > 0) {
            curentIncrement = c.getPositionIncrement();
        } else {
            c.setPositionIncrement(curentIncrement);
        }
    }

    // Fix up position increments for in phrase isolated wildcards

    boolean lastWasWild = false;
    for (int i = 0; i < list.size() - 1; i++) {
        for (int j = list.get(i).endOffset() + 1; j < list.get(i + 1).startOffset() - 1; j++) {
            if (wildcardPoistions.contains(j)) {
                if (!lastWasWild) {
                    list.get(i + 1).setPositionIncrement(list.get(i + 1).getPositionIncrement() + 1);
                }
                lastWasWild = true;
            } else {
                lastWasWild = false;
            }
        }
    }

    Collections.sort(list, new Comparator<PackedTokenAttributeImpl>() {

        public int compare(PackedTokenAttributeImpl o1, PackedTokenAttributeImpl o2) {
            int dif = o1.startOffset() - o2.startOffset();
            return dif;

        }
    });

    // Combined * and ? based strings - should redo the tokeniser

    // Build tokens by position

    LinkedList<LinkedList<PackedTokenAttributeImpl>> tokensByPosition = new LinkedList<LinkedList<PackedTokenAttributeImpl>>();
    LinkedList<PackedTokenAttributeImpl> currentList = null;
    int lastStart = 0;
    for (PackedTokenAttributeImpl c : list) {
        if (c.startOffset() == lastStart) {
            if (currentList == null) {
                currentList = new LinkedList<PackedTokenAttributeImpl>();
                tokensByPosition.add(currentList);
            }
            currentList.add(c);
        } else {
            currentList = new LinkedList<PackedTokenAttributeImpl>();
            tokensByPosition.add(currentList);
            currentList.add(c);
        }
        lastStart = c.startOffset();
    }

    // Build all the token sequences and see which ones get strung together

    OrderedHashSet<LinkedList<PackedTokenAttributeImpl>> allTokenSequencesSet = new OrderedHashSet<LinkedList<PackedTokenAttributeImpl>>();
    for (LinkedList<PackedTokenAttributeImpl> tokensAtPosition : tokensByPosition) {
        OrderedHashSet<LinkedList<PackedTokenAttributeImpl>> positionalSynonymSequencesSet = new OrderedHashSet<LinkedList<PackedTokenAttributeImpl>>();

        OrderedHashSet<LinkedList<PackedTokenAttributeImpl>> newAllTokenSequencesSet = new OrderedHashSet<LinkedList<PackedTokenAttributeImpl>>();

        FOR_FIRST_TOKEN_AT_POSITION_ONLY: for (PackedTokenAttributeImpl t : tokensAtPosition) {
            PackedTokenAttributeImpl replace = new PackedTokenAttributeImpl();
            replace.setEmpty().append(t);
            replace.setOffset(t.startOffset(), t.endOffset());
            replace.setType(t.type());
            replace.setPositionIncrement(t.getPositionIncrement());

            boolean tokenFoundSequence = false;
            for (LinkedList<PackedTokenAttributeImpl> tokenSequence : allTokenSequencesSet) {
                LinkedList<PackedTokenAttributeImpl> newEntry = new LinkedList<PackedTokenAttributeImpl>();
                newEntry.addAll(tokenSequence);
                if ((newEntry.getLast().endOffset() == replace.endOffset())
                        && replace.type().equals(SynonymFilter.TYPE_SYNONYM)) {
                    if ((newEntry.getLast().startOffset() == replace.startOffset())
                            && newEntry.getLast().type().equals(SynonymFilter.TYPE_SYNONYM)) {
                        positionalSynonymSequencesSet.add(tokenSequence);
                        newEntry.add(replace);
                        tokenFoundSequence = true;
                    } else if (newEntry.getLast().type().equals(CommonGramsFilter.GRAM_TYPE)) {
                        if (newEntry.toString().endsWith(replace.toString())) {
                            // already in the gram
                            positionalSynonymSequencesSet.add(tokenSequence);
                            tokenFoundSequence = true;
                        } else {
                            // need to replace the synonym in the current
                            // gram
                            tokenFoundSequence = true;
                            StringBuffer old = new StringBuffer(newEntry.getLast().toString());
                            old.replace(replace.startOffset() - newEntry.getLast().startOffset(),
                                    replace.endOffset() - newEntry.getLast().startOffset(), replace.toString());
                            PackedTokenAttributeImpl newToken = new PackedTokenAttributeImpl();
                            newToken.setEmpty().append(old.toString());
                            newToken.setOffset(newEntry.getLast().startOffset(),
                                    newEntry.getLast().endOffset());
                            newEntry.removeLast();
                            newEntry.add(newToken);
                        }
                    }
                } else if ((newEntry.getLast().startOffset() < replace.startOffset())
                        && (newEntry.getLast().endOffset() < replace.endOffset())) {
                    if (newEntry.getLast().type().equals(SynonymFilter.TYPE_SYNONYM)
                            && replace.type().equals(SynonymFilter.TYPE_SYNONYM)) {
                        positionalSynonymSequencesSet.add(tokenSequence);
                    }
                    newEntry.add(replace);
                    tokenFoundSequence = true;
                }
                newAllTokenSequencesSet.add(newEntry);
            }
            if (false == tokenFoundSequence) {
                for (LinkedList<PackedTokenAttributeImpl> tokenSequence : newAllTokenSequencesSet) {
                    LinkedList<PackedTokenAttributeImpl> newEntry = new LinkedList<PackedTokenAttributeImpl>();
                    newEntry.addAll(tokenSequence);
                    if ((newEntry.getLast().endOffset() == replace.endOffset())
                            && replace.type().equals(SynonymFilter.TYPE_SYNONYM)) {
                        if ((newEntry.getLast().startOffset() == replace.startOffset())
                                && newEntry.getLast().type().equals(SynonymFilter.TYPE_SYNONYM)) {
                            positionalSynonymSequencesSet.add(tokenSequence);
                            newEntry.add(replace);
                            tokenFoundSequence = true;
                        } else if (newEntry.getLast().type().equals(CommonGramsFilter.GRAM_TYPE)) {
                            if (newEntry.toString().endsWith(replace.toString())) {
                                // already in the gram
                                positionalSynonymSequencesSet.add(tokenSequence);
                                tokenFoundSequence = true;
                            } else {
                                // need to replace the synonym in the
                                // current gram
                                tokenFoundSequence = true;
                                StringBuffer old = new StringBuffer(newEntry.getLast().toString());
                                old.replace(replace.startOffset() - newEntry.getLast().startOffset(),
                                        replace.endOffset() - newEntry.getLast().startOffset(),
                                        replace.toString());
                                PackedTokenAttributeImpl newToken = new PackedTokenAttributeImpl();
                                newToken.setEmpty().append(old.toString());
                                newToken.setOffset(newEntry.getLast().startOffset(),
                                        newEntry.getLast().endOffset());
                                newEntry.removeLast();
                                newEntry.add(newToken);
                                positionalSynonymSequencesSet.add(newEntry);
                            }
                        }
                    } else if ((newEntry.getLast().startOffset() < replace.startOffset())
                            && (newEntry.getLast().endOffset() < replace.endOffset())) {
                        if (newEntry.getLast().type().equals(SynonymFilter.TYPE_SYNONYM)
                                && replace.type().equals(SynonymFilter.TYPE_SYNONYM)) {
                            positionalSynonymSequencesSet.add(tokenSequence);
                            newEntry.add(replace);
                            tokenFoundSequence = true;
                        }
                    }
                }
            }
            if (false == tokenFoundSequence) {
                LinkedList<PackedTokenAttributeImpl> newEntry = new LinkedList<PackedTokenAttributeImpl>();
                newEntry.add(replace);
                newAllTokenSequencesSet.add(newEntry);
            }
            // Limit the max number of permutations we consider
            if (newAllTokenSequencesSet.size() > 64) {
                break FOR_FIRST_TOKEN_AT_POSITION_ONLY;
            }
        }
        allTokenSequencesSet = newAllTokenSequencesSet;
        allTokenSequencesSet.addAll(positionalSynonymSequencesSet);

    }

    LinkedList<LinkedList<PackedTokenAttributeImpl>> allTokenSequences = new LinkedList<LinkedList<PackedTokenAttributeImpl>>(
            allTokenSequencesSet);

    // build the unique

    LinkedList<LinkedList<PackedTokenAttributeImpl>> fixedTokenSequences = new LinkedList<LinkedList<PackedTokenAttributeImpl>>();
    for (LinkedList<PackedTokenAttributeImpl> tokenSequence : allTokenSequences) {
        LinkedList<PackedTokenAttributeImpl> fixedTokenSequence = new LinkedList<PackedTokenAttributeImpl>();
        fixedTokenSequences.add(fixedTokenSequence);
        PackedTokenAttributeImpl replace = null;
        for (PackedTokenAttributeImpl c : tokenSequence) {
            if (replace == null) {
                StringBuilder prefix = new StringBuilder();
                for (int i = c.startOffset() - 1; i >= 0; i--) {
                    char test = testText.charAt(i);
                    if (((test == '*') || (test == '?')) && wildcardPoistions.contains(i)) {
                        prefix.insert(0, test);
                    } else {
                        break;
                    }
                }
                String pre = prefix.toString();
                if (requiresMLTokenDuplication) {
                    String termText = c.toString();
                    int position = termText.indexOf("}");
                    String language = termText.substring(0, position + 1);
                    String token = termText.substring(position + 1);
                    replace = new PackedTokenAttributeImpl();
                    replace.setEmpty().append(language + pre + token);
                    replace.setOffset(c.startOffset() - pre.length(), c.endOffset());
                    replace.setType(c.type());
                    replace.setPositionIncrement(c.getPositionIncrement());
                } else {
                    String termText = c.toString();
                    replace = new PackedTokenAttributeImpl();
                    replace.setEmpty().append(pre + termText);
                    replace.setOffset(c.startOffset() - pre.length(), c.endOffset());
                    replace.setType(c.type());
                    replace.setPositionIncrement(c.getPositionIncrement());
                }
            } else {
                StringBuilder prefix = new StringBuilder();
                StringBuilder postfix = new StringBuilder();
                StringBuilder builder = prefix;
                for (int i = c.startOffset() - 1; i >= replace.endOffset(); i--) {
                    char test = testText.charAt(i);
                    if (((test == '*') || (test == '?')) && wildcardPoistions.contains(i)) {
                        builder.insert(0, test);
                    } else {
                        builder = postfix;
                        postfix.setLength(0);
                    }
                }
                String pre = prefix.toString();
                String post = postfix.toString();

                // Does it bridge?
                if ((pre.length() > 0) && (replace.endOffset() + pre.length()) == c.startOffset()) {
                    String termText = c.toString();
                    if (requiresMLTokenDuplication) {
                        int position = termText.indexOf("}");
                        @SuppressWarnings("unused")
                        String language = termText.substring(0, position + 1);
                        String token = termText.substring(position + 1);
                        int oldPositionIncrement = replace.getPositionIncrement();
                        String replaceTermText = replace.toString();
                        replace = new PackedTokenAttributeImpl();
                        replace.setEmpty().append(replaceTermText + pre + token);
                        replace.setOffset(replace.startOffset(), c.endOffset());
                        replace.setType(replace.type());
                        replace.setPositionIncrement(oldPositionIncrement);
                    } else {
                        int oldPositionIncrement = replace.getPositionIncrement();
                        String replaceTermText = replace.toString();
                        replace = new PackedTokenAttributeImpl();
                        replace.setEmpty().append(replaceTermText + pre + termText);
                        replace.setOffset(replace.startOffset(), c.endOffset());
                        replace.setType(replace.type());
                        replace.setPositionIncrement(oldPositionIncrement);
                    }
                } else {
                    String termText = c.toString();
                    if (requiresMLTokenDuplication) {
                        int position = termText.indexOf("}");
                        String language = termText.substring(0, position + 1);
                        String token = termText.substring(position + 1);
                        String replaceTermText = replace.toString();
                        PackedTokenAttributeImpl last = new PackedTokenAttributeImpl();
                        last.setEmpty().append(replaceTermText + post);
                        last.setOffset(replace.startOffset(), replace.endOffset() + post.length());
                        last.setType(replace.type());
                        last.setPositionIncrement(replace.getPositionIncrement());
                        fixedTokenSequence.add(last);
                        replace = new PackedTokenAttributeImpl();
                        replace.setEmpty().append(language + pre + token);
                        replace.setOffset(c.startOffset() - pre.length(), c.endOffset());
                        replace.setType(c.type());
                        replace.setPositionIncrement(c.getPositionIncrement());
                    } else {
                        String replaceTermText = replace.toString();
                        PackedTokenAttributeImpl last = new PackedTokenAttributeImpl();
                        last.setEmpty().append(replaceTermText + post);
                        last.setOffset(replace.startOffset(), replace.endOffset() + post.length());
                        last.setType(replace.type());
                        last.setPositionIncrement(replace.getPositionIncrement());
                        fixedTokenSequence.add(last);
                        replace = new PackedTokenAttributeImpl();
                        replace.setEmpty().append(pre + termText);
                        replace.setOffset(c.startOffset() - pre.length(), c.endOffset());
                        replace.setType(c.type());
                        replace.setPositionIncrement(c.getPositionIncrement());
                    }
                }
            }
        }
        // finish last
        if (replace != null) {
            StringBuilder postfix = new StringBuilder();
            if ((replace.endOffset() >= 0) && (replace.endOffset() < testText.length())) {
                for (int i = replace.endOffset(); i < testText.length(); i++) {
                    char test = testText.charAt(i);
                    if (((test == '*') || (test == '?')) && wildcardPoistions.contains(i)) {
                        postfix.append(test);
                    } else {
                        break;
                    }
                }
            }
            String post = postfix.toString();
            int oldPositionIncrement = replace.getPositionIncrement();
            String replaceTermText = replace.toString();
            PackedTokenAttributeImpl terminal = new PackedTokenAttributeImpl();
            terminal.setEmpty().append(replaceTermText + post);
            terminal.setOffset(replace.startOffset(), replace.endOffset() + post.length());
            terminal.setType(replace.type());
            terminal.setPositionIncrement(oldPositionIncrement);
            fixedTokenSequence.add(terminal);
        }
    }

    // rebuild fixed list

    ArrayList<PackedTokenAttributeImpl> fixed = new ArrayList<PackedTokenAttributeImpl>();
    for (LinkedList<PackedTokenAttributeImpl> tokenSequence : fixedTokenSequences) {
        for (PackedTokenAttributeImpl token : tokenSequence) {
            fixed.add(token);
        }
    }

    // reorder by start position and increment

    Collections.sort(fixed, new Comparator<PackedTokenAttributeImpl>() {

        public int compare(PackedTokenAttributeImpl o1, PackedTokenAttributeImpl o2) {
            int dif = o1.startOffset() - o2.startOffset();
            if (dif != 0) {
                return dif;
            } else {
                return o1.getPositionIncrement() - o2.getPositionIncrement();
            }
        }
    });

    // make sure we remove any tokens we have duplicated

    @SuppressWarnings("rawtypes")
    OrderedHashSet unique = new OrderedHashSet();
    unique.addAll(fixed);
    fixed = new ArrayList<PackedTokenAttributeImpl>(unique);

    list = fixed;

    // add any missing locales back to the tokens

    if (localePrefix.length() > 0) {
        for (int j = 0; j < list.size(); j++) {
            PackedTokenAttributeImpl currentToken = list.get(j);
            String termText = currentToken.toString();
            currentToken.setEmpty();
            currentToken.append(localePrefix + termText);
        }
    }

    SchemaField sf = schema.getField(field);

    boolean isShingled = false;
    @SuppressWarnings("resource")
    TokenizerChain tokenizerChain = (sf.getType().getQueryAnalyzer() instanceof TokenizerChain)
            ? ((TokenizerChain) sf.getType().getQueryAnalyzer())
            : null;
    if (tokenizerChain != null) {
        for (TokenFilterFactory factory : tokenizerChain.getTokenFilterFactories()) {
            if (factory instanceof ShingleFilterFactory) {
                isShingled = true;
                break;
            }
        }
    }
    @SuppressWarnings("resource")
    AlfrescoAnalyzerWrapper analyzerWrapper = (sf.getType()
            .getQueryAnalyzer() instanceof AlfrescoAnalyzerWrapper)
                    ? ((AlfrescoAnalyzerWrapper) sf.getType().getQueryAnalyzer())
                    : null;
    if (analyzerWrapper != null) {
        // assume if there are no term positions it is shingled ....
        isShingled = true;
    }

    boolean forceConjuncion = rerankPhase == RerankPhase.QUERY_PHASE;

    if (list.size() == 0) {
        return null;
    } else if (list.size() == 1) {
        nextToken = list.get(0);
        String termText = nextToken.toString();
        if (!isNumeric && (termText.contains("*") || termText.contains("?"))) {
            return newWildcardQuery(new Term(field, termText));
        } else {
            return newTermQuery(new Term(field, termText));
        }
    } else {
        if (severalTokensAtSamePosition) {
            if (positionCount == 1) {
                // no phrase query:
                Builder q = newBooleanQuery();
                for (int i = 0; i < list.size(); i++) {
                    Query currentQuery;
                    nextToken = list.get(i);
                    String termText = nextToken.toString();
                    if (termText.contains("*") || termText.contains("?")) {
                        currentQuery = newWildcardQuery(new Term(field, termText));
                    } else {
                        currentQuery = newTermQuery(new Term(field, termText));
                    }
                    q.add(currentQuery, BooleanClause.Occur.SHOULD);
                }
                return q.build();
            } else if (forceConjuncion) {
                BooleanQuery.Builder or = new BooleanQuery.Builder();

                for (LinkedList<PackedTokenAttributeImpl> tokenSequence : fixedTokenSequences) {
                    BooleanQuery.Builder and = new BooleanQuery.Builder();
                    for (int i = 0; i < tokenSequence.size(); i++) {
                        nextToken = (PackedTokenAttributeImpl) tokenSequence.get(i);
                        String termText = nextToken.toString();

                        Term term = new Term(field, termText);
                        if ((termText != null) && (termText.contains("*") || termText.contains("?"))) {
                            org.apache.lucene.search.WildcardQuery wildQuery = new org.apache.lucene.search.WildcardQuery(
                                    term);
                            and.add(wildQuery, Occur.MUST);
                        } else {
                            TermQuery termQuery = new TermQuery(term);
                            and.add(termQuery, Occur.MUST);
                        }
                    }
                    if (and.build().clauses().size() > 0) {
                        or.add(and.build(), Occur.SHOULD);
                    }
                }
                return or.build();
            }
            // shingle
            else if (sf.omitPositions() && isShingled) {

                ArrayList<PackedTokenAttributeImpl> nonContained = getNonContained(list);
                Query currentQuery;

                BooleanQuery.Builder weakPhrase = new BooleanQuery.Builder();
                for (PackedTokenAttributeImpl shingleToken : nonContained) {
                    String termText = shingleToken.toString();
                    Term term = new Term(field, termText);

                    if ((termText != null) && (termText.contains("*") || termText.contains("?"))) {
                        currentQuery = new org.apache.lucene.search.WildcardQuery(term);
                    } else {
                        currentQuery = new TermQuery(term);
                    }
                    weakPhrase.add(currentQuery, Occur.MUST);
                }

                return weakPhrase.build();

            }
            // Word delimiter factory and other odd things generate complex
            // token patterns
            // Smart skip token sequences with small tokens that generate
            // toomany wildcards
            // Fall back to the larger pattern
            // e.g Site1* will not do (S ite 1*) or (Site 1*) if 1* matches
            // too much (S ite1*) and (Site1*) will still be OK
            // If we skip all (for just 1* in the input) this is still an
            // issue.
            else {

                return generateSpanOrQuery(field, fixedTokenSequences);

            }
        } else {
            if (forceConjuncion) {
                BooleanQuery.Builder or = new BooleanQuery.Builder();

                for (LinkedList<PackedTokenAttributeImpl> tokenSequence : fixedTokenSequences) {
                    BooleanQuery.Builder and = new BooleanQuery.Builder();
                    for (int i = 0; i < tokenSequence.size(); i++) {
                        nextToken = (PackedTokenAttributeImpl) tokenSequence.get(i);
                        String termText = nextToken.toString();

                        Term term = new Term(field, termText);
                        if ((termText != null) && (termText.contains("*") || termText.contains("?"))) {
                            org.apache.lucene.search.WildcardQuery wildQuery = new org.apache.lucene.search.WildcardQuery(
                                    term);
                            and.add(wildQuery, Occur.MUST);
                        } else {
                            TermQuery termQuery = new TermQuery(term);
                            and.add(termQuery, Occur.MUST);
                        }
                    }
                    if (and.build().clauses().size() > 0) {
                        or.add(and.build(), Occur.SHOULD);
                    }
                }
                return or.build();
            } else {
                SpanQuery spanQuery = null;
                ArrayList<SpanQuery> atSamePositionSpanOrQueryParts = new ArrayList<SpanQuery>();
                int gap = 0;
                for (int i = 0; i < list.size(); i++) {
                    nextToken = list.get(i);
                    String termText = nextToken.toString();
                    Term term = new Term(field, termText);
                    if (getEnablePositionIncrements()) {
                        SpanQuery nextSpanQuery;
                        if ((termText != null) && (termText.contains("*") || termText.contains("?"))) {
                            org.apache.lucene.search.WildcardQuery wildQuery = new org.apache.lucene.search.WildcardQuery(
                                    term);
                            SpanMultiTermQueryWrapper<org.apache.lucene.search.WildcardQuery> wrapper = new SpanMultiTermQueryWrapper<org.apache.lucene.search.WildcardQuery>(
                                    wildQuery);
                            wrapper.setRewriteMethod(
                                    new TopTermsSpanBooleanQueryRewrite(topTermSpanRewriteLimit));
                            nextSpanQuery = wrapper;
                        } else {
                            nextSpanQuery = new SpanTermQuery(term);
                        }
                        if (gap == 0) {
                            atSamePositionSpanOrQueryParts.add(nextSpanQuery);
                        } else {
                            if (atSamePositionSpanOrQueryParts.size() == 0) {
                                if (spanQuery == null) {
                                    spanQuery = nextSpanQuery;
                                } else {
                                    spanQuery = new SpanNearQuery(new SpanQuery[] { spanQuery, nextSpanQuery },
                                            (gap - 1) + internalSlop, internalSlop < 2);
                                }
                                atSamePositionSpanOrQueryParts = new ArrayList<SpanQuery>();
                            } else if (atSamePositionSpanOrQueryParts.size() == 1) {
                                if (spanQuery == null) {
                                    spanQuery = atSamePositionSpanOrQueryParts.get(0);
                                } else {
                                    spanQuery = new SpanNearQuery(
                                            new SpanQuery[] { spanQuery,
                                                    atSamePositionSpanOrQueryParts.get(0) },
                                            (gap - 1) + internalSlop, internalSlop < 2);
                                }
                                atSamePositionSpanOrQueryParts = new ArrayList<SpanQuery>();
                                atSamePositionSpanOrQueryParts.add(nextSpanQuery);
                            } else {
                                if (spanQuery == null) {
                                    spanQuery = new SpanOrQuery(
                                            atSamePositionSpanOrQueryParts.toArray(new SpanQuery[] {}));
                                } else {
                                    spanQuery = new SpanNearQuery(
                                            new SpanQuery[] { spanQuery,
                                                    new SpanOrQuery(atSamePositionSpanOrQueryParts
                                                            .toArray(new SpanQuery[] {})) },
                                            (gap - 1) + internalSlop, internalSlop < 2);
                                }
                                atSamePositionSpanOrQueryParts = new ArrayList<SpanQuery>();
                                atSamePositionSpanOrQueryParts.add(nextSpanQuery);
                            }
                        }
                        gap = nextToken.getPositionIncrement();
                    } else {
                        SpanQuery nextSpanQuery;
                        if ((termText != null) && (termText.contains("*") || termText.contains("?"))) {
                            org.apache.lucene.search.WildcardQuery wildQuery = new org.apache.lucene.search.WildcardQuery(
                                    term);
                            SpanMultiTermQueryWrapper<org.apache.lucene.search.WildcardQuery> wrapper = new SpanMultiTermQueryWrapper<org.apache.lucene.search.WildcardQuery>(
                                    wildQuery);
                            wrapper.setRewriteMethod(
                                    new TopTermsSpanBooleanQueryRewrite(topTermSpanRewriteLimit));
                            nextSpanQuery = wrapper;
                        } else {
                            nextSpanQuery = new SpanTermQuery(term);
                        }
                        if (spanQuery == null) {
                            spanQuery = new SpanOrQuery(nextSpanQuery);
                        } else {
                            spanQuery = new SpanOrQuery(spanQuery, nextSpanQuery);
                        }
                    }
                }
                if (atSamePositionSpanOrQueryParts.size() == 0) {
                    return spanQuery;
                } else if (atSamePositionSpanOrQueryParts.size() == 1) {
                    if (spanQuery == null) {
                        spanQuery = atSamePositionSpanOrQueryParts.get(0);
                    } else {
                        spanQuery = new SpanNearQuery(
                                new SpanQuery[] { spanQuery, atSamePositionSpanOrQueryParts.get(0) },
                                (gap - 1) + internalSlop, internalSlop < 2);
                    }
                    return spanQuery;
                } else {
                    if (spanQuery == null) {
                        spanQuery = new SpanOrQuery(atSamePositionSpanOrQueryParts.toArray(new SpanQuery[] {}));
                    } else {
                        spanQuery = new SpanNearQuery(
                                new SpanQuery[] { spanQuery,
                                        new SpanOrQuery(
                                                atSamePositionSpanOrQueryParts.toArray(new SpanQuery[] {})) },
                                (gap - 1) + internalSlop, internalSlop < 2);
                    }
                    return spanQuery;
                }
            }
        }
    }
}

From source file:org.alfresco.solr.query.Solr4QueryParser.java

License:Open Source License

/**
 * @param field//from   w w w  . j a  va 2 s .c  o  m
 * @param fixedTokenSequences
 *            LinkedList<LinkedList<PackedTokenAttributeImpl>>
 * @return Query
 */
protected SpanQuery generateSpanOrQuery(String field,
        LinkedList<LinkedList<PackedTokenAttributeImpl>> fixedTokenSequences) {
    PackedTokenAttributeImpl nextToken;
    ArrayList<SpanQuery> spanOrQueryParts = new ArrayList<SpanQuery>();

    for (LinkedList<PackedTokenAttributeImpl> tokenSequence : fixedTokenSequences) {
        int gap = 1;
        SpanQuery spanQuery = null;
        ArrayList<SpanQuery> atSamePositionSpanOrQueryParts = new ArrayList<SpanQuery>();

        // MNT-13239: if all tokens's positions are incremented by one then
        // create flat nearQuery
        if (getEnablePositionIncrements() && isAllTokensSequentiallyShifted(tokenSequence)) {
            // there will be no tokens at same position
            List<SpanQuery> wildWrappedList = new ArrayList<SpanQuery>(tokenSequence.size());
            for (PackedTokenAttributeImpl token : tokenSequence) {
                String termText = token.toString();
                Term term = new Term(field, termText);
                SpanQuery nextSpanQuery = wrapWildcardTerms(term);
                wildWrappedList.add(nextSpanQuery);
            }
            if (wildWrappedList.size() == 1) {
                spanQuery = wildWrappedList.get(0);
            } else {
                spanQuery = new SpanNearQuery(wildWrappedList.toArray(new SpanQuery[wildWrappedList.size()]), 0,
                        true);
            }
        } else {
            for (int i = 0; i < tokenSequence.size(); i++) {
                nextToken = (PackedTokenAttributeImpl) tokenSequence.get(i);
                String termText = nextToken.toString();

                Term term = new Term(field, termText);

                if (getEnablePositionIncrements()) {
                    SpanQuery nextSpanQuery = wrapWildcardTerms(term);
                    if (gap == 0) {
                        atSamePositionSpanOrQueryParts.add(nextSpanQuery);
                    } else {
                        if (atSamePositionSpanOrQueryParts.size() == 0) {
                            if (spanQuery == null) {
                                spanQuery = nextSpanQuery;
                            } else {
                                spanQuery = new SpanNearQuery(new SpanQuery[] { spanQuery, nextSpanQuery },
                                        (gap - 1) + internalSlop, internalSlop < 2);
                            }
                            atSamePositionSpanOrQueryParts = new ArrayList<SpanQuery>();
                        } else if (atSamePositionSpanOrQueryParts.size() == 1) {
                            if (spanQuery == null) {
                                spanQuery = atSamePositionSpanOrQueryParts.get(0);
                            } else {
                                spanQuery = new SpanNearQuery(
                                        new SpanQuery[] { spanQuery, atSamePositionSpanOrQueryParts.get(0) },
                                        (gap - 1) + internalSlop, internalSlop < 2);
                            }
                            atSamePositionSpanOrQueryParts = new ArrayList<SpanQuery>();
                            atSamePositionSpanOrQueryParts.add(nextSpanQuery);
                        } else {
                            if (spanQuery == null) {
                                spanQuery = new SpanOrQuery(
                                        atSamePositionSpanOrQueryParts.toArray(new SpanQuery[] {}));
                            } else {
                                spanQuery = new SpanNearQuery(
                                        new SpanQuery[] { spanQuery,
                                                spanQuery = new SpanOrQuery(atSamePositionSpanOrQueryParts
                                                        .toArray(new SpanQuery[] {})) },
                                        (gap - 1) + internalSlop, internalSlop < 2);
                            }
                            atSamePositionSpanOrQueryParts = new ArrayList<SpanQuery>();
                            atSamePositionSpanOrQueryParts.add(nextSpanQuery);
                        }
                    }
                    gap = nextToken.getPositionIncrement();

                } else {
                    SpanQuery nextSpanQuery;
                    if ((termText != null) && (termText.contains("*") || termText.contains("?"))) {
                        org.apache.lucene.search.WildcardQuery wildQuery = new org.apache.lucene.search.WildcardQuery(
                                term);
                        SpanMultiTermQueryWrapper<org.apache.lucene.search.WildcardQuery> wrapper = new SpanMultiTermQueryWrapper<org.apache.lucene.search.WildcardQuery>(
                                wildQuery);
                        wrapper.setRewriteMethod(new TopTermsSpanBooleanQueryRewrite(topTermSpanRewriteLimit));
                        nextSpanQuery = wrapper;
                    } else {
                        nextSpanQuery = new SpanTermQuery(term);
                    }
                    if (spanQuery == null) {
                        spanQuery = new SpanOrQuery(nextSpanQuery);
                    } else {
                        spanQuery = new SpanOrQuery(spanQuery, nextSpanQuery);
                    }
                }
            }
        }

        if (atSamePositionSpanOrQueryParts.size() == 0) {
            spanOrQueryParts.add(spanQuery);
        } else if (atSamePositionSpanOrQueryParts.size() == 1) {
            if (spanQuery == null) {
                spanQuery = atSamePositionSpanOrQueryParts.get(0);
            } else {
                spanQuery = new SpanNearQuery(
                        new SpanQuery[] { spanQuery, atSamePositionSpanOrQueryParts.get(0) },
                        (gap - 1) + internalSlop, internalSlop < 2);
            }
            atSamePositionSpanOrQueryParts = new ArrayList<SpanQuery>();
            spanOrQueryParts.add(spanQuery);
        } else {
            if (spanQuery == null) {
                spanQuery = new SpanOrQuery(atSamePositionSpanOrQueryParts.toArray(new SpanQuery[] {}));
            } else {
                spanQuery = new SpanNearQuery(
                        new SpanQuery[] { spanQuery,
                                new SpanOrQuery(atSamePositionSpanOrQueryParts.toArray(new SpanQuery[] {})) },
                        (gap - 1) + internalSlop, internalSlop < 2);
            }
            atSamePositionSpanOrQueryParts = new ArrayList<SpanQuery>();
            spanOrQueryParts.add(spanQuery);
        }
    }
    if (spanOrQueryParts.size() == 1) {
        return spanOrQueryParts.get(0);
    } else {
        return new SpanOrQuery(spanOrQueryParts.toArray(new SpanQuery[] {}));
    }
}