Example usage for org.apache.lucene.queries.function FunctionValues strVal

List of usage examples for org.apache.lucene.queries.function FunctionValues strVal

Introduction

In this page you can find the example usage for org.apache.lucene.queries.function FunctionValues strVal.

Prototype

public String strVal(int doc) throws IOException 

Source Link

Usage

From source file:com.mysoft.b2b.solr.B258DynamicSourceParser.java

License:Open Source License

@SuppressWarnings("rawtypes")
@Override//from   ww  w.j  ava  2 s . com
public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
    final FunctionValues fieldVals = fieldSource.getValues(context, readerContext);
    final FunctionValues[] valsArr = new FunctionValues[valueSource.length];
    System.err.println("valueSource.length-------------" + valueSource.length);
    for (int i = 0; i < valueSource.length; i++) {
        valsArr[i] = valueSource[i].getValues(context, readerContext);
        System.err.println(valsArr[i]);
    }
    return new IntDocValues(this) {
        @Override
        public int intVal(int doc) {
            String source = fieldVals.strVal(doc);
            System.err.println("source------------------" + source);
            System.err.println("valsArr.length-----" + valsArr.length);
            System.err.println("doc----" + doc);
            for (FunctionValues fv : valsArr) {
                int ss = fv.intVal(doc);
                System.err.println("args-----" + ss);
                if (doc > 7) {
                    return 2;
                }
            }
            return 1;
        }

        @Override
        public String toString(int doc) {
            return name() + '(' + fieldVals.strVal(doc) + ')';
        }
    };
}

From source file:com.rzewucki.solr.functions.LengthFunction.java

License:Apache License

/**
 * Gets the values for this reader and the context. This method
 * returns FunctionValues object containing integer for output.
 * @param context SOLR context object//from   w  w w . j  a  va2s .com
 * @param readerContext index reader object
 * @return FunctionValues object
 * @exception IOException
 * @see FunctionValues
 */
@Override
public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
    final FunctionValues firstValues = (valueSource != null) ? (valueSource.getValues(context, readerContext))
            : (null);

    return new IntDocValues(this) {
        @Override
        public int intVal(int documentId) {//this method is called for each document in results set
            String stringValue;
            if ((firstValues == null) || ((stringValue = firstValues.strVal(documentId)) == null)) {
                return 0;
            } else {
                return stringValue.length();
            }
        }
    };
}

From source file:org.apache.solr.analytics.util.valuesource.ConcatStringFunction.java

License:Apache License

@Override
protected String func(int doc, FunctionValues[] valsArr) {
    StringBuilder sb = new StringBuilder();
    for (FunctionValues val : valsArr) {
        String v = val.strVal(doc);
        if (v == null) {
            return null;
        } else {/*from w  ww  .j  av  a2s .  com*/
            sb.append(v);
        }
    }
    return sb.toString();
}

From source file:org.apache.solr.analytics.util.valuesource.FilterFieldSource.java

License:Apache License

@Override
public FunctionValues getValues(Map context, LeafReaderContext readerContext) throws IOException {
    final FunctionValues vals = source.getValues(context, readerContext);
    return new FunctionValues() {

        @Override/*from   w w w.jav  a  2  s. c om*/
        public byte byteVal(int doc) {
            return vals.byteVal(doc);
        }

        @Override
        public short shortVal(int doc) {
            return vals.shortVal(doc);
        }

        @Override
        public float floatVal(int doc) {
            return vals.floatVal(doc);
        }

        @Override
        public int intVal(int doc) {
            return vals.intVal(doc);
        }

        @Override
        public long longVal(int doc) {
            return vals.longVal(doc);
        }

        @Override
        public double doubleVal(int doc) {
            return vals.doubleVal(doc);
        }

        @Override
        public String strVal(int doc) {
            return vals.strVal(doc);
        }

        @Override
        public Object objectVal(int doc) {
            return exists(doc) ? vals.objectVal(doc) : null;
        }

        @Override
        public boolean exists(int doc) {
            Object other = vals.objectVal(doc);
            return other != null && !missValue.equals(other);
        }

        @Override
        public String toString(int doc) {
            return NAME + '(' + vals.toString(doc) + ')';
        }

        @Override
        public ValueFiller getValueFiller() {
            return new ValueFiller() {
                private final ValueFiller delegateFiller = vals.getValueFiller();
                private final MutableValue mval = delegateFiller.getValue();

                @Override
                public MutableValue getValue() {
                    return mval;
                }

                @Override
                public void fillValue(int doc) {
                    delegateFiller.fillValue(doc);
                    mval.exists = exists(doc);
                }
            };
        }
    };
}

From source file:org.apache.solr.analytics.util.valuesource.ReverseStringFunction.java

License:Apache License

protected CharSequence func(int doc, FunctionValues vals) {
    String val = vals.strVal(doc);
    return val != null ? StringUtils.reverse(val) : null;
}

From source file:org.apache.solr.request.NumericFacets.java

License:Apache License

public static NamedList<Integer> getCounts(SolrIndexSearcher searcher, DocSet docs, String fieldName,
        int offset, int limit, int mincount, boolean missing, String sort) throws IOException {
    final boolean zeros = mincount <= 0;
    mincount = Math.max(mincount, 1);
    final SchemaField sf = searcher.getSchema().getField(fieldName);
    final FieldType ft = sf.getType();
    final NumericType numericType = ft.getNumericType();
    if (numericType == null) {
        throw new IllegalStateException();
    }//from  ww w . ja v a 2s .c  om
    final List<AtomicReaderContext> leaves = searcher.getIndexReader().leaves();

    // 1. accumulate
    final HashTable hashTable = new HashTable();
    final Iterator<AtomicReaderContext> ctxIt = leaves.iterator();
    AtomicReaderContext ctx = null;
    FieldCache.Longs longs = null;
    Bits docsWithField = null;
    int missingCount = 0;
    for (DocIterator docsIt = docs.iterator(); docsIt.hasNext();) {
        final int doc = docsIt.nextDoc();
        if (ctx == null || doc >= ctx.docBase + ctx.reader().maxDoc()) {
            do {
                ctx = ctxIt.next();
            } while (ctx == null || doc >= ctx.docBase + ctx.reader().maxDoc());
            assert doc >= ctx.docBase;
            switch (numericType) {
            case LONG:
                longs = FieldCache.DEFAULT.getLongs(ctx.reader(), fieldName, true);
                break;
            case INT:
                final FieldCache.Ints ints = FieldCache.DEFAULT.getInts(ctx.reader(), fieldName, true);
                longs = new FieldCache.Longs() {
                    @Override
                    public long get(int docID) {
                        return ints.get(docID);
                    }
                };
                break;
            case FLOAT:
                final FieldCache.Floats floats = FieldCache.DEFAULT.getFloats(ctx.reader(), fieldName, true);
                longs = new FieldCache.Longs() {
                    @Override
                    public long get(int docID) {
                        return NumericUtils.floatToSortableInt(floats.get(docID));
                    }
                };
                break;
            case DOUBLE:
                final FieldCache.Doubles doubles = FieldCache.DEFAULT.getDoubles(ctx.reader(), fieldName, true);
                longs = new FieldCache.Longs() {
                    @Override
                    public long get(int docID) {
                        return NumericUtils.doubleToSortableLong(doubles.get(docID));
                    }
                };
                break;
            default:
                throw new AssertionError();
            }
            docsWithField = FieldCache.DEFAULT.getDocsWithField(ctx.reader(), fieldName);
        }
        long v = longs.get(doc - ctx.docBase);
        if (v != 0 || docsWithField.get(doc - ctx.docBase)) {
            hashTable.add(doc, v, 1);
        } else {
            ++missingCount;
        }
    }

    // 2. select top-k facet values
    final int pqSize = limit < 0 ? hashTable.size : Math.min(offset + limit, hashTable.size);
    final PriorityQueue<Entry> pq;
    if (FacetParams.FACET_SORT_COUNT.equals(sort) || FacetParams.FACET_SORT_COUNT_LEGACY.equals(sort)) {
        pq = new PriorityQueue<Entry>(pqSize) {
            @Override
            protected boolean lessThan(Entry a, Entry b) {
                if (a.count < b.count || (a.count == b.count && a.bits > b.bits)) {
                    return true;
                } else {
                    return false;
                }
            }
        };
    } else {
        pq = new PriorityQueue<Entry>(pqSize) {
            @Override
            protected boolean lessThan(Entry a, Entry b) {
                return a.bits > b.bits;
            }
        };
    }
    Entry e = null;
    for (int i = 0; i < hashTable.bits.length; ++i) {
        if (hashTable.counts[i] >= mincount) {
            if (e == null) {
                e = new Entry();
            }
            e.bits = hashTable.bits[i];
            e.count = hashTable.counts[i];
            e.docID = hashTable.docIDs[i];
            e = pq.insertWithOverflow(e);
        }
    }

    // 4. build the NamedList
    final ValueSource vs = ft.getValueSource(sf, null);
    final NamedList<Integer> result = new NamedList<Integer>();

    // This stuff is complicated because if facet.mincount=0, the counts needs
    // to be merged with terms from the terms dict
    if (!zeros || FacetParams.FACET_SORT_COUNT.equals(sort)
            || FacetParams.FACET_SORT_COUNT_LEGACY.equals(sort)) {
        // Only keep items we're interested in
        final Deque<Entry> counts = new ArrayDeque<Entry>();
        while (pq.size() > offset) {
            counts.addFirst(pq.pop());
        }

        // Entries from the PQ first, then using the terms dictionary
        for (Entry entry : counts) {
            final int readerIdx = ReaderUtil.subIndex(entry.docID, leaves);
            final FunctionValues values = vs.getValues(Collections.emptyMap(), leaves.get(readerIdx));
            result.add(values.strVal(entry.docID - leaves.get(readerIdx).docBase), entry.count);
        }

        if (zeros && (limit < 0 || result.size() < limit)) { // need to merge with the term dict
            if (!sf.indexed()) {
                throw new IllegalStateException("Cannot use " + FacetParams.FACET_MINCOUNT + "=0 on field "
                        + sf.getName() + " which is not indexed");
            }
            // Add zeros until there are limit results
            final Set<String> alreadySeen = new HashSet<String>();
            while (pq.size() > 0) {
                Entry entry = pq.pop();
                final int readerIdx = ReaderUtil.subIndex(entry.docID, leaves);
                final FunctionValues values = vs.getValues(Collections.emptyMap(), leaves.get(readerIdx));
                alreadySeen.add(values.strVal(entry.docID - leaves.get(readerIdx).docBase));
            }
            for (int i = 0; i < result.size(); ++i) {
                alreadySeen.add(result.getName(i));
            }
            final Terms terms = searcher.getAtomicReader().terms(fieldName);
            if (terms != null) {
                final String prefixStr = TrieField.getMainValuePrefix(ft);
                final BytesRef prefix;
                if (prefixStr != null) {
                    prefix = new BytesRef(prefixStr);
                } else {
                    prefix = new BytesRef();
                }
                final TermsEnum termsEnum = terms.iterator(null);
                BytesRef term;
                switch (termsEnum.seekCeil(prefix)) {
                case FOUND:
                case NOT_FOUND:
                    term = termsEnum.term();
                    break;
                case END:
                    term = null;
                    break;
                default:
                    throw new AssertionError();
                }
                final CharsRef spare = new CharsRef();
                for (int skipped = hashTable.size; skipped < offset && term != null
                        && StringHelper.startsWith(term, prefix);) {
                    ft.indexedToReadable(term, spare);
                    final String termStr = spare.toString();
                    if (!alreadySeen.contains(termStr)) {
                        ++skipped;
                    }
                    term = termsEnum.next();
                }
                for (; term != null && StringHelper.startsWith(term, prefix)
                        && (limit < 0 || result.size() < limit); term = termsEnum.next()) {
                    ft.indexedToReadable(term, spare);
                    final String termStr = spare.toString();
                    if (!alreadySeen.contains(termStr)) {
                        result.add(termStr, 0);
                    }
                }
            }
        }
    } else {
        // sort=index, mincount=0 and we have less than limit items
        // => Merge the PQ and the terms dictionary on the fly
        if (!sf.indexed()) {
            throw new IllegalStateException("Cannot use " + FacetParams.FACET_SORT + "="
                    + FacetParams.FACET_SORT_INDEX + " on a field which is not indexed");
        }
        final Map<String, Integer> counts = new HashMap<String, Integer>();
        while (pq.size() > 0) {
            final Entry entry = pq.pop();
            final int readerIdx = ReaderUtil.subIndex(entry.docID, leaves);
            final FunctionValues values = vs.getValues(Collections.emptyMap(), leaves.get(readerIdx));
            counts.put(values.strVal(entry.docID - leaves.get(readerIdx).docBase), entry.count);
        }
        final Terms terms = searcher.getAtomicReader().terms(fieldName);
        if (terms != null) {
            final String prefixStr = TrieField.getMainValuePrefix(ft);
            final BytesRef prefix;
            if (prefixStr != null) {
                prefix = new BytesRef(prefixStr);
            } else {
                prefix = new BytesRef();
            }
            final TermsEnum termsEnum = terms.iterator(null);
            BytesRef term;
            switch (termsEnum.seekCeil(prefix)) {
            case FOUND:
            case NOT_FOUND:
                term = termsEnum.term();
                break;
            case END:
                term = null;
                break;
            default:
                throw new AssertionError();
            }
            final CharsRef spare = new CharsRef();
            for (int i = 0; i < offset && term != null && StringHelper.startsWith(term, prefix); ++i) {
                term = termsEnum.next();
            }
            for (; term != null && StringHelper.startsWith(term, prefix)
                    && (limit < 0 || result.size() < limit); term = termsEnum.next()) {
                ft.indexedToReadable(term, spare);
                final String termStr = spare.toString();
                Integer count = counts.get(termStr);
                if (count == null) {
                    count = 0;
                }
                result.add(termStr, count);
            }
        }
    }

    if (missing) {
        result.add(null, missingCount);
    }
    return result;
}

From source file:org.apache.solr.search.function.ConcatStringFunction.java

License:Apache License

@Override
protected String func(int doc, FunctionValues[] valsArr) throws IOException {
    StringBuilder sb = new StringBuilder();
    for (FunctionValues val : valsArr) {
        String v = val.strVal(doc);
        if (v == null) {
            return null;
        } else {//www .j  av a2s  . co  m
            sb.append(v);
        }
    }
    return sb.toString();
}

From source file:org.apache.solr.search.function.distance.GeohashHaversineFunction.java

License:Apache License

protected double distance(int doc, FunctionValues gh1DV, FunctionValues gh2DV) {
    double result = 0;
    String h1 = gh1DV.strVal(doc);
    String h2 = gh2DV.strVal(doc);
    if (h1 != null && h2 != null && h1.equals(h2) == false) {
        //TODO: If one of the hashes is a literal value source, seems like we could cache it
        //and avoid decoding every time
        Point p1 = GeohashUtils.decode(h1, ctx);
        Point p2 = GeohashUtils.decode(h2, ctx);
        result = ctx.getDistCalc().distance(p1, p2) * degreesToDist;
    } else if (h1 == null || h2 == null) {
        result = Double.MAX_VALUE;
    }/*www.jav a 2  s  .  c  o  m*/
    return result;
}

From source file:org.apache.solr.search.function.distance.StringDistanceFunction.java

License:Apache License

@Override
public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
    final FunctionValues str1DV = str1.getValues(context, readerContext);
    final FunctionValues str2DV = str2.getValues(context, readerContext);
    return new FloatDocValues(this) {

        @Override/*from   w ww.  jav  a2 s.  c om*/
        public float floatVal(int doc) {
            return dist.getDistance(str1DV.strVal(doc), str2DV.strVal(doc));
        }

        @Override
        public String toString(int doc) {
            StringBuilder sb = new StringBuilder();
            sb.append("strdist").append('(');
            sb.append(str1DV.toString(doc)).append(',').append(str2DV.toString(doc)).append(", dist=")
                    .append(dist.getClass().getName());
            sb.append(')');
            return sb.toString();
        }
    };
}

From source file:org.apache.solr.search.TestIndexSearcher.java

License:Apache License

private String getStringVal(SolrQueryRequest sqr, String field, int doc) throws IOException {
    SchemaField sf = sqr.getSchema().getField(field);
    ValueSource vs = sf.getType().getValueSource(sf, null);
    Map context = ValueSource.newContext(sqr.getSearcher());
    vs.createWeight(context, sqr.getSearcher());
    IndexReaderContext topReaderContext = sqr.getSearcher().getTopReaderContext();
    List<AtomicReaderContext> leaves = topReaderContext.leaves();
    int idx = ReaderUtil.subIndex(doc, leaves);
    AtomicReaderContext leaf = leaves.get(idx);
    FunctionValues vals = vs.getValues(context, leaf);
    return vals.strVal(doc - leaf.docBase);
}