Example usage for org.apache.lucene.search WildcardQuery toAutomaton

List of usage examples for org.apache.lucene.search WildcardQuery toAutomaton

Introduction

In this page you can find the example usage for org.apache.lucene.search WildcardQuery toAutomaton.

Prototype

@SuppressWarnings("fallthrough")
public static Automaton toAutomaton(Term wildcardquery) 

Source Link

Document

Convert Lucene wildcard syntax into an automaton.

Usage

From source file:org.apache.solr.parser.SolrQueryParserBase.java

License:Apache License

protected Query getWildcardQuery(String field, String termStr) throws SyntaxError {
    checkNullField(field);//ww  w  .  j  a  va  2  s .  c o  m
    // *:* -> MatchAllDocsQuery
    if ("*".equals(termStr)) {
        if ("*".equals(field) || getExplicitField() == null) {
            return newMatchAllDocsQuery();
        }
    }

    FieldType fieldType = schema.getFieldType(field);
    termStr = analyzeIfMultitermTermText(field, termStr, fieldType);
    // can we use reversed wildcards in this field?
    ReversedWildcardFilterFactory factory = getReversedWildcardFilterFactory(fieldType);
    if (factory != null) {
        Term term = new Term(field, termStr);
        // fsa representing the query
        Automaton automaton = WildcardQuery.toAutomaton(term);
        // TODO: we should likely use the automaton to calculate shouldReverse, too.
        if (factory.shouldReverse(termStr)) {
            automaton = BasicOperations.concatenate(automaton, BasicAutomata.makeChar(factory.getMarkerChar()));
            SpecialOperations.reverse(automaton);
        } else {
            // reverse wildcardfilter is active: remove false positives
            // fsa representing false positives (markerChar*)
            Automaton falsePositives = BasicOperations.concatenate(
                    BasicAutomata.makeChar(factory.getMarkerChar()), BasicAutomata.makeAnyString());
            // subtract these away
            automaton = BasicOperations.minus(automaton, falsePositives);
        }
        return new AutomatonQuery(term, automaton) {
            // override toString so its completely transparent
            @Override
            public String toString(String field) {
                StringBuilder buffer = new StringBuilder();
                if (!getField().equals(field)) {
                    buffer.append(getField());
                    buffer.append(":");
                }
                buffer.append(term.text());
                buffer.append(ToStringUtils.boost(getBoost()));
                return buffer.toString();
            }
        };
    }

    // Solr has always used constant scoring for wildcard queries.  This should return constant scoring by default.
    return newWildcardQuery(new Term(field, termStr));
}

From source file:org.eu.bitzone.Leia.java

License:Apache License

private void _explainStructure(final Object parent, final Query q) {
    String clazz = q.getClass().getName();
    if (clazz.startsWith("org.apache.lucene.")) {
        clazz = "lucene." + q.getClass().getSimpleName();
    } else if (clazz.startsWith("org.apache.solr.")) {
        clazz = "solr." + q.getClass().getSimpleName();
    }//from  ww w  .  j  a  v a 2  s  . c o m
    final float boost = q.getBoost();
    final Object n = create("node");
    add(parent, n);
    String msg = clazz;
    if (boost != 1.0f) {
        msg += ": boost=" + df.format(boost);
    }
    setFont(n, getFont().deriveFont(Font.BOLD));
    setString(n, "text", msg);
    if (clazz.equals("lucene.TermQuery")) {
        final Object n1 = create("node");
        final Term t = ((TermQuery) q).getTerm();
        setString(n1, "text", "Term: field='" + t.field() + "' text='" + t.text() + "'");
        add(n, n1);
    } else if (clazz.equals("lucene.BooleanQuery")) {
        final BooleanQuery bq = (BooleanQuery) q;
        final BooleanClause[] clauses = bq.getClauses();
        final int max = BooleanQuery.getMaxClauseCount();
        Object n1 = create("node");
        String descr = "clauses=" + clauses.length + ", maxClauses=" + max;
        if (bq.isCoordDisabled()) {
            descr += ", coord=false";
        }
        if (bq.getMinimumNumberShouldMatch() > 0) {
            descr += ", minShouldMatch=" + bq.getMinimumNumberShouldMatch();
        }
        setString(n1, "text", descr);
        add(n, n1);
        for (int i = 0; i < clauses.length; i++) {
            n1 = create("node");
            String occur;
            final Occur occ = clauses[i].getOccur();
            if (occ.equals(Occur.MUST)) {
                occur = "MUST";
            } else if (occ.equals(Occur.MUST_NOT)) {
                occur = "MUST_NOT";
            } else if (occ.equals(Occur.SHOULD)) {
                occur = "SHOULD";
            } else {
                occur = occ.toString();
            }
            setString(n1, "text", "Clause " + i + ": " + occur);
            add(n, n1);
            _explainStructure(n1, clauses[i].getQuery());
        }
    } else if (clazz.equals("lucene.PrefixQuery")) {
        Object n1 = create("node");
        final PrefixQuery pq = (PrefixQuery) q;
        final Term t = pq.getPrefix();
        setString(n1, "text", "Prefix: field='" + t.field() + "' text='" + t.text() + "'");
        add(n, n1);
        try {
            addTermsEnum(n, PrefixQuery.class, pq.getField(), pq);
        } catch (final Exception e) {
            e.printStackTrace();
            n1 = create("node");
            setString(n1, "text", "TermEnum: Exception " + e.getMessage());
            add(n, n1);
        }
    } else if (clazz.equals("lucene.PhraseQuery")) {
        final PhraseQuery pq = (PhraseQuery) q;
        setString(n, "text", getString(n, "text") + ", slop=" + pq.getSlop());
        final int[] pos = pq.getPositions();
        final Term[] terms = pq.getTerms();
        Object n1 = create("node");
        final StringBuffer sb = new StringBuffer("pos: [");
        for (int i = 0; i < pos.length; i++) {
            if (i > 0) {
                sb.append(',');
            }
            sb.append("" + pos[i]);
        }
        sb.append("]");
        setString(n1, "text", sb.toString());
        add(n, n1);
        for (int i = 0; i < terms.length; i++) {
            n1 = create("node");
            setString(n1, "text",
                    "Term " + i + ": field='" + terms[i].field() + "' text='" + terms[i].text() + "'");
            add(n, n1);
        }
    } else if (clazz.equals("lucene.MultiPhraseQuery")) {
        final MultiPhraseQuery pq = (MultiPhraseQuery) q;
        setString(n, "text", getString(n, "text") + ", slop=" + pq.getSlop());
        final int[] pos = pq.getPositions();
        Object n1 = create("node");
        final StringBuffer sb = new StringBuffer("pos: [");
        for (int i = 0; i < pos.length; i++) {
            if (i > 0) {
                sb.append(',');
            }
            sb.append("" + pos[i]);
        }
        sb.append("]");
        setString(n1, "text", sb.toString());
        add(n, n1);
        n1 = create("node");
        System.err.println("MultiPhraseQuery is missing the public getTermArrays() :-(");
        setString(n1, "text", "toString: " + pq.toString());
        add(n, n1);
    } else if (clazz.equals("lucene.FuzzyQuery")) {
        final FuzzyQuery fq = (FuzzyQuery) q;
        Object n1 = create("node");
        setString(n1, "text", "field=" + fq.getField() + ", prefixLen=" + fq.getPrefixLength() + ", maxEdits="
                + df.format(fq.getMaxEdits()));
        add(n, n1);
        try {
            addTermsEnum(n, FuzzyQuery.class, fq.getField(), fq);
        } catch (final Exception e) {
            e.printStackTrace();
            n1 = create("node");
            setString(n1, "text", "TermEnum: Exception " + e.getMessage());
            add(n, n1);
        }
    } else if (clazz.equals("lucene.WildcardQuery")) {
        final WildcardQuery wq = (WildcardQuery) q;
        final Term t = wq.getTerm();
        setString(n, "text", getString(n, "text") + ", term=" + t);
        final Automaton a = WildcardQuery.toAutomaton(t);
        addAutomaton(n, a);
    } else if (clazz.equals("lucene.TermRangeQuery")) {
        final TermRangeQuery rq = (TermRangeQuery) q;
        setString(n, "text", getString(n, "text") + ", inclLower=" + rq.includesLower() + ", inclUpper="
                + rq.includesUpper());
        Object n1 = create("node");
        setString(n1, "text", "lowerTerm=" + rq.getField() + ":" + rq.getLowerTerm() + "'");
        add(n, n1);
        n1 = create("node");
        setString(n1, "text", "upperTerm=" + rq.getField() + ":" + rq.getUpperTerm() + "'");
        add(n, n1);
        try {
            addTermsEnum(n, TermRangeQuery.class, rq.getField(), rq);
        } catch (final Exception e) {
            e.printStackTrace();
            n1 = create("node");
            setString(n1, "text", "TermEnum: Exception " + e.getMessage());
            add(n, n1);
        }
    } else if (q instanceof AutomatonQuery) {
        final AutomatonQuery aq = (AutomatonQuery) q;
        setString(n, "text", getString(n, "text") + ", " + aq.toString());
        // get automaton
        try {
            final java.lang.reflect.Field aField = AutomatonQuery.class.getDeclaredField("automaton");
            aField.setAccessible(true);
            final Automaton a = (Automaton) aField.get(aq);
            addAutomaton(n, a);
        } catch (final Exception e) {
            e.printStackTrace();
            final Object n1 = create("node");
            setString(n1, "text", "Automaton: Exception " + e.getMessage());
            add(n, n1);
        }
    } else if (q instanceof MultiTermQuery) {
        final MultiTermQuery mq = (MultiTermQuery) q;
        final Set<Term> terms = new HashSet<Term>();
        mq.extractTerms(terms);
        setString(n, "text", getString(n, "text") + ", terms: " + terms);
        try {
            addTermsEnum(n, TermRangeQuery.class, mq.getField(), mq);
        } catch (final Exception e) {
            e.printStackTrace();
            final Object n1 = create("node");
            setString(n1, "text", "TermEnum: Exception " + e.getMessage());
            add(n, n1);
        }
    } else if (q instanceof ConstantScoreQuery) {
        final ConstantScoreQuery cq = (ConstantScoreQuery) q;
        setString(n, "text", getString(n, "text") + ", " + cq.toString());
        final Object n1 = create("node");
        add(n, n1);
        if (cq.getFilter() != null) {
            setString(n1, "text", "Filter: " + cq.getFilter().toString());
        } else if (cq.getQuery() != null) {
            _explainStructure(n, cq.getQuery());
        }
    } else if (q instanceof FilteredQuery) {
        final FilteredQuery fq = (FilteredQuery) q;
        final Object n1 = create("node");
        setString(n1, "text", "Filter: " + fq.getFilter().toString());
        add(n, n1);
        _explainStructure(n, fq.getQuery());
    } else if (q instanceof SpanQuery) {
        final SpanQuery sq = (SpanQuery) q;
        final Class sqlass = sq.getClass();
        setString(n, "text", getString(n, "text") + ", field=" + sq.getField());
        if (sqlass == SpanOrQuery.class) {
            final SpanOrQuery soq = (SpanOrQuery) sq;
            setString(n, "text", getString(n, "text") + ", " + soq.getClauses().length + " clauses");
            for (final SpanQuery sq1 : soq.getClauses()) {
                _explainStructure(n, sq1);
            }
        } else if (sqlass == SpanFirstQuery.class) {
            final SpanFirstQuery sfq = (SpanFirstQuery) sq;
            setString(n, "text", getString(n, "text") + ", end=" + sfq.getEnd() + ", match:");
            _explainStructure(n, sfq.getMatch());
        } else if (q instanceof SpanNearQuery) { // catch also known subclasses
            final SpanNearQuery snq = (SpanNearQuery) sq;
            setString(n, "text", getString(n, "text") + ", slop=" + snq.getSlop());
            if (snq instanceof PayloadNearQuery) {
                try {
                    final java.lang.reflect.Field function = PayloadNearQuery.class
                            .getDeclaredField("function");
                    function.setAccessible(true);
                    final Object func = function.get(snq);
                    setString(n, "text", getString(n, "text") + ", func=" + func.getClass().getSimpleName());
                } catch (final Exception e) {
                    e.printStackTrace();
                }
            }
            for (final SpanQuery sq1 : snq.getClauses()) {
                _explainStructure(n, sq1);
            }
        } else if (sqlass == SpanNotQuery.class) {
            final SpanNotQuery snq = (SpanNotQuery) sq;
            Object n1 = create("node");
            add(n, n1);
            setString(n1, "text", "Include:");
            _explainStructure(n1, snq.getInclude());
            n1 = create("node");
            add(n, n1);
            setString(n1, "text", "Exclude:");
            _explainStructure(n1, snq.getExclude());
        } else if (q instanceof SpanTermQuery) {
            final SpanTermQuery stq = (SpanTermQuery) sq;
            setString(n, "text", getString(n, "text") + ", term=" + stq.getTerm());
            if (stq instanceof PayloadTermQuery) {
                try {
                    final java.lang.reflect.Field function = PayloadTermQuery.class
                            .getDeclaredField("function");
                    function.setAccessible(true);
                    final Object func = function.get(stq);
                    setString(n, "text", getString(n, "text") + ", func=" + func.getClass().getSimpleName());
                } catch (final Exception e) {
                    e.printStackTrace();
                }
            }
        } else {
            final String defField = getDefaultField(find("srchOptTabs"));
            setString(n, "text", "class=" + q.getClass().getName() + ", " + getString(n, "text") + ", toString="
                    + q.toString(defField));
            final HashSet<Term> terms = new HashSet<Term>();
            sq.extractTerms(terms);
            Object n1 = null;
            if (terms != null) {
                n1 = create("node");
                setString(n1, "text", "Matched terms (" + terms.size() + "):");
                add(n, n1);
                final Iterator<Term> it = terms.iterator();
                while (it.hasNext()) {
                    final Object n2 = create("node");
                    final Term t = it.next();
                    setString(n2, "text", "field='" + t.field() + "' text='" + t.text() + "'");
                    add(n1, n2);
                }
            } else {
                n1 = create("node");
                setString(n1, "text", "<no terms matched>");
                add(n, n1);
            }
        }
        if (ir != null) {
            final Object n1 = null;
            /*
             * in Lucene 4.0 this requires traversal of sub- and leaf readers, which is cumbersome to do here. try { Spans
             * spans = sq.getSpans(ir); if (spans != null) { n1 = create("node"); int cnt = 0; while (spans.next()) { Object
             * n2 = create("node"); setString(n2, "text", "doc=" + spans.doc() + ", start=" + spans.start() + ", end=" +
             * spans.end()); add(n1, n2); cnt++; } if (cnt > 0) { add(n, n1); setString(n1, "text", "Spans (" + cnt + "):");
             * setBoolean(n1, "expanded", false); } } } catch (Exception e) { e.printStackTrace(); n1 = create("node");
             * setString(n1, "text", "Spans Exception: " + e.getMessage()); add(n, n1); }
             */
        }
    } else {
        Object n1 = create("node");
        final String defField = getDefaultField(find("srchOptTabs"));
        final Set<Term> terms = new HashSet<Term>();
        q.extractTerms(terms);
        setString(n1, "text", q.getClass().getName() + ": " + q.toString(defField));
        add(n, n1);
        if (!terms.isEmpty()) {
            n1 = create("node");
            setString(n1, "text", "terms: " + terms);
            add(n, n1);
        }
    }
}

From source file:org.exist.indexing.lucene.XMLToQuery.java

License:Open Source License

private Term[] expandTerms(String field, String queryStr) throws XPathException {
    List<Term> termList = new ArrayList<>(8);
    Automaton automaton = WildcardQuery.toAutomaton(new Term(field, queryStr));
    CompiledAutomaton compiled = new CompiledAutomaton(automaton);
    IndexReader reader = null;// w  w  w .j a v  a2s. c  o m
    try {
        reader = index.getReader();

        for (AtomicReaderContext atomic : reader.leaves()) {
            Terms terms = atomic.reader().terms(field);
            if (terms != null) {
                TermsEnum termsEnum = compiled.getTermsEnum(terms);
                BytesRef data = termsEnum.next();
                while (data != null) {
                    String term = data.utf8ToString();
                    termList.add(new Term(field, term));
                    data = termsEnum.next();
                }
            }
        }
    } catch (IOException e) {
        throw new XPathException("Lucene index error while creating query: " + e.getMessage(), e);
    } finally {
        index.releaseReader(reader);
    }
    Term[] matchingTerms = new Term[termList.size()];
    return termList.toArray(matchingTerms);
}

From source file:org.getopt.luke.Luke.java

License:Apache License

private void _explainStructure(Object parent, Query q) {
    String clazz = q.getClass().getName();
    if (clazz.startsWith("org.apache.lucene.")) {
        clazz = "lucene." + q.getClass().getSimpleName();
    } else if (clazz.startsWith("org.apache.solr.")) {
        clazz = "solr." + q.getClass().getSimpleName();
    }//from ww  w . j  a v  a 2  s  . c  o m
    float boost = q.getBoost();
    Object n = create("node");
    add(parent, n);
    String msg = clazz;
    if (boost != 1.0f) {
        msg += ": boost=" + df.format(boost);
    }
    setFont(n, getFont().deriveFont(Font.BOLD));
    setString(n, "text", msg);
    if (clazz.equals("lucene.TermQuery")) {
        Object n1 = create("node");
        Term t = ((TermQuery) q).getTerm();
        setString(n1, "text", "Term: field='" + t.field() + "' text='" + t.text() + "'");
        add(n, n1);
    } else if (clazz.equals("lucene.BooleanQuery")) {
        BooleanQuery bq = (BooleanQuery) q;
        BooleanClause[] clauses = bq.getClauses();
        int max = bq.getMaxClauseCount();
        Object n1 = create("node");
        String descr = "clauses=" + clauses.length + ", maxClauses=" + max;
        if (bq.isCoordDisabled()) {
            descr += ", coord=false";
        }
        if (bq.getMinimumNumberShouldMatch() > 0) {
            descr += ", minShouldMatch=" + bq.getMinimumNumberShouldMatch();
        }
        setString(n1, "text", descr);
        add(n, n1);
        for (int i = 0; i < clauses.length; i++) {
            n1 = create("node");
            String occur;
            Occur occ = clauses[i].getOccur();
            if (occ.equals(Occur.MUST)) {
                occur = "MUST";
            } else if (occ.equals(Occur.MUST_NOT)) {
                occur = "MUST_NOT";
            } else if (occ.equals(Occur.SHOULD)) {
                occur = "SHOULD";
            } else {
                occur = occ.toString();
            }
            setString(n1, "text", "Clause " + i + ": " + occur);
            add(n, n1);
            _explainStructure(n1, clauses[i].getQuery());
        }
    } else if (clazz.equals("lucene.PrefixQuery")) {
        Object n1 = create("node");
        PrefixQuery pq = (PrefixQuery) q;
        Term t = pq.getPrefix();
        setString(n1, "text", "Prefix: field='" + t.field() + "' text='" + t.text() + "'");
        add(n, n1);
        try {
            addTermsEnum(n, PrefixQuery.class, pq.getField(), pq);
        } catch (Exception e) {
            e.printStackTrace();
            n1 = create("node");
            setString(n1, "text", "TermEnum: Exception " + e.getMessage());
            add(n, n1);
        }
    } else if (clazz.equals("lucene.PhraseQuery")) {
        PhraseQuery pq = (PhraseQuery) q;
        setString(n, "text", getString(n, "text") + ", slop=" + pq.getSlop());
        int[] pos = pq.getPositions();
        Term[] terms = pq.getTerms();
        Object n1 = create("node");
        StringBuffer sb = new StringBuffer("pos: [");
        for (int i = 0; i < pos.length; i++) {
            if (i > 0)
                sb.append(',');
            sb.append("" + pos[i]);
        }
        sb.append("]");
        setString(n1, "text", sb.toString());
        add(n, n1);
        for (int i = 0; i < terms.length; i++) {
            n1 = create("node");
            setString(n1, "text",
                    "Term " + i + ": field='" + terms[i].field() + "' text='" + terms[i].text() + "'");
            add(n, n1);
        }
    } else if (clazz.equals("lucene.MultiPhraseQuery")) {
        MultiPhraseQuery pq = (MultiPhraseQuery) q;
        setString(n, "text", getString(n, "text") + ", slop=" + pq.getSlop());
        int[] pos = pq.getPositions();
        Object n1 = create("node");
        StringBuffer sb = new StringBuffer("pos: [");
        for (int i = 0; i < pos.length; i++) {
            if (i > 0)
                sb.append(',');
            sb.append("" + pos[i]);
        }
        sb.append("]");
        setString(n1, "text", sb.toString());
        add(n, n1);
        n1 = create("node");
        System.err.println("MultiPhraseQuery is missing the public getTermArrays() :-(");
        setString(n1, "text", "toString: " + pq.toString());
        add(n, n1);
    } else if (clazz.equals("lucene.FuzzyQuery")) {
        FuzzyQuery fq = (FuzzyQuery) q;
        Object n1 = create("node");
        setString(n1, "text", "field=" + fq.getField() + ", prefixLen=" + fq.getPrefixLength() + ", maxEdits="
                + df.format(fq.getMaxEdits()));
        add(n, n1);
        try {
            addTermsEnum(n, FuzzyQuery.class, fq.getField(), fq);
        } catch (Exception e) {
            e.printStackTrace();
            n1 = create("node");
            setString(n1, "text", "TermEnum: Exception " + e.getMessage());
            add(n, n1);
        }
    } else if (clazz.equals("lucene.WildcardQuery")) {
        WildcardQuery wq = (WildcardQuery) q;
        Term t = wq.getTerm();
        setString(n, "text", getString(n, "text") + ", term=" + t);
        Automaton a = WildcardQuery.toAutomaton(t);
        addAutomaton(n, a);
    } else if (clazz.equals("lucene.TermRangeQuery")) {
        TermRangeQuery rq = (TermRangeQuery) q;
        setString(n, "text", getString(n, "text") + ", inclLower=" + rq.includesLower() + ", inclUpper="
                + rq.includesUpper());
        Object n1 = create("node");
        setString(n1, "text", "lowerTerm=" + rq.getField() + ":" + rq.getLowerTerm() + "'");
        add(n, n1);
        n1 = create("node");
        setString(n1, "text", "upperTerm=" + rq.getField() + ":" + rq.getUpperTerm() + "'");
        add(n, n1);
        try {
            addTermsEnum(n, TermRangeQuery.class, rq.getField(), rq);
        } catch (Exception e) {
            e.printStackTrace();
            n1 = create("node");
            setString(n1, "text", "TermEnum: Exception " + e.getMessage());
            add(n, n1);
        }
    } else if (q instanceof AutomatonQuery) {
        AutomatonQuery aq = (AutomatonQuery) q;
        setString(n, "text", getString(n, "text") + ", " + aq.toString());
        // get automaton
        try {
            java.lang.reflect.Field aField = AutomatonQuery.class.getDeclaredField("automaton");
            aField.setAccessible(true);
            Automaton a = (Automaton) aField.get(aq);
            addAutomaton(n, a);
        } catch (Exception e) {
            e.printStackTrace();
            Object n1 = create("node");
            setString(n1, "text", "Automaton: Exception " + e.getMessage());
            add(n, n1);
        }
    } else if (q instanceof MultiTermQuery) {
        MultiTermQuery mq = (MultiTermQuery) q;
        Set<Term> terms = new HashSet<Term>();
        mq.extractTerms(terms);
        setString(n, "text", getString(n, "text") + ", terms: " + terms);
        try {
            addTermsEnum(n, TermRangeQuery.class, mq.getField(), mq);
        } catch (Exception e) {
            e.printStackTrace();
            Object n1 = create("node");
            setString(n1, "text", "TermEnum: Exception " + e.getMessage());
            add(n, n1);
        }
    } else if (q instanceof ConstantScoreQuery) {
        ConstantScoreQuery cq = (ConstantScoreQuery) q;
        setString(n, "text", getString(n, "text") + ", " + cq.toString());
        Object n1 = create("node");
        add(n, n1);
        if (cq.getFilter() != null) {
            setString(n1, "text", "Filter: " + cq.getFilter().toString());
        } else if (cq.getQuery() != null) {
            _explainStructure(n, cq.getQuery());
        }
    } else if (q instanceof FilteredQuery) {
        FilteredQuery fq = (FilteredQuery) q;
        Object n1 = create("node");
        setString(n1, "text", "Filter: " + fq.getFilter().toString());
        add(n, n1);
        _explainStructure(n, fq.getQuery());
    } else if (q instanceof SpanQuery) {
        SpanQuery sq = (SpanQuery) q;
        Class sqlass = sq.getClass();
        setString(n, "text", getString(n, "text") + ", field=" + sq.getField());
        if (sqlass == SpanOrQuery.class) {
            SpanOrQuery soq = (SpanOrQuery) sq;
            setString(n, "text", getString(n, "text") + ", " + soq.getClauses().length + " clauses");
            for (SpanQuery sq1 : soq.getClauses()) {
                _explainStructure(n, sq1);
            }
        } else if (sqlass == SpanFirstQuery.class) {
            SpanFirstQuery sfq = (SpanFirstQuery) sq;
            setString(n, "text", getString(n, "text") + ", end=" + sfq.getEnd() + ", match:");
            _explainStructure(n, sfq.getMatch());
        } else if (q instanceof SpanNearQuery) { // catch also known subclasses
            SpanNearQuery snq = (SpanNearQuery) sq;
            setString(n, "text", getString(n, "text") + ", slop=" + snq.getSlop());
            if (snq instanceof PayloadNearQuery) {
                try {
                    java.lang.reflect.Field function = PayloadNearQuery.class.getDeclaredField("function");
                    function.setAccessible(true);
                    Object func = function.get(snq);
                    setString(n, "text", getString(n, "text") + ", func=" + func.getClass().getSimpleName());
                } catch (Exception e) {
                    e.printStackTrace();
                }
            }
            for (SpanQuery sq1 : snq.getClauses()) {
                _explainStructure(n, sq1);
            }
        } else if (sqlass == SpanNotQuery.class) {
            SpanNotQuery snq = (SpanNotQuery) sq;
            Object n1 = create("node");
            add(n, n1);
            setString(n1, "text", "Include:");
            _explainStructure(n1, snq.getInclude());
            n1 = create("node");
            add(n, n1);
            setString(n1, "text", "Exclude:");
            _explainStructure(n1, snq.getExclude());
        } else if (q instanceof SpanTermQuery) {
            SpanTermQuery stq = (SpanTermQuery) sq;
            setString(n, "text", getString(n, "text") + ", term=" + stq.getTerm());
            if (stq instanceof PayloadTermQuery) {
                try {
                    java.lang.reflect.Field function = PayloadTermQuery.class.getDeclaredField("function");
                    function.setAccessible(true);
                    Object func = function.get(stq);
                    setString(n, "text", getString(n, "text") + ", func=" + func.getClass().getSimpleName());
                } catch (Exception e) {
                    e.printStackTrace();
                }
            }
        } else {
            String defField = getDefaultField(find("srchOptTabs"));
            setString(n, "text", "class=" + q.getClass().getName() + ", " + getString(n, "text") + ", toString="
                    + q.toString(defField));
            HashSet<Term> terms = new HashSet<Term>();
            sq.extractTerms(terms);
            Object n1 = null;
            if (terms != null) {
                n1 = create("node");
                setString(n1, "text", "Matched terms (" + terms.size() + "):");
                add(n, n1);
                Iterator<Term> it = terms.iterator();
                while (it.hasNext()) {
                    Object n2 = create("node");
                    Term t = it.next();
                    setString(n2, "text", "field='" + t.field() + "' text='" + t.text() + "'");
                    add(n1, n2);
                }
            } else {
                n1 = create("node");
                setString(n1, "text", "<no terms matched>");
                add(n, n1);
            }
        }
        if (ir != null) {
            Object n1 = null;
            /* in Lucene 4.0 this requires traversal of sub- and leaf readers,
             * which is cumbersome to do here.
            try {
              Spans spans = sq.getSpans(ir);
              if (spans != null) {
                n1 = create("node");
                int cnt = 0;
                while (spans.next()) {
                  Object n2 = create("node");
                  setString(n2, "text", "doc=" + spans.doc() +
              ", start=" + spans.start() + ", end=" + spans.end());
                  add(n1, n2);
                  cnt++;
                }
                if (cnt > 0) {
                  add(n, n1);
                  setString(n1, "text", "Spans (" + cnt + "):");
                  setBoolean(n1, "expanded", false);
                }
              }
            } catch (Exception e) {
              e.printStackTrace();
              n1 = create("node");
              setString(n1, "text", "Spans Exception: " + e.getMessage());
              add(n, n1);
            }
            */
        }
    } else {
        Object n1 = create("node");
        String defField = getDefaultField(find("srchOptTabs"));
        Set<Term> terms = new HashSet<Term>();
        q.extractTerms(terms);
        setString(n1, "text", q.getClass().getName() + ": " + q.toString(defField));
        add(n, n1);
        if (!terms.isEmpty()) {
            n1 = create("node");
            setString(n1, "text", "terms: " + terms);
            add(n, n1);
        }
    }
}

From source file:org.zenoss.zep.index.impl.lucene.LuceneQueryBuilder.java

License:Open Source License

private static Term[] getMatchingTerms(String fieldName, IndexReader reader, String value) throws ZepException {
    // Don't search for matches if text doesn't contain wildcards
    if (value.indexOf('*') == -1 && value.indexOf('?') == -1)
        return new Term[] { new Term(fieldName, value) };

    logger.debug("getMatchingTerms: field={}, value={}", fieldName, value);
    List<Term> matches = new ArrayList<Term>();
    Automaton automaton = WildcardQuery.toAutomaton(new Term(fieldName, value));
    CompiledAutomaton compiled = new CompiledAutomaton(automaton);
    try {/*from w  ww.ja  v a2s  . co  m*/
        Terms terms = SlowCompositeReaderWrapper.wrap(reader).terms(fieldName);
        TermsEnum wildcardTermEnum = compiled.getTermsEnum(terms);
        BytesRef match;
        while (wildcardTermEnum.next() != null) {
            match = wildcardTermEnum.term();
            logger.debug("Match: {}", match);
            matches.add(new Term(fieldName, match.utf8ToString()));
        }
        return matches.toArray(new Term[matches.size()]);
    } catch (IOException e) {
        throw new ZepException(e.getLocalizedMessage(), e);
    }
}