Example usage for org.apache.lucene.index Terms hasPayloads

List of usage examples for org.apache.lucene.index Terms hasPayloads

Introduction

In this page you can find the example usage for org.apache.lucene.index Terms hasPayloads.

Prototype

public abstract boolean hasPayloads();

Source Link

Document

Returns true if documents in this field store payloads.

Usage

From source file:org.apache.blur.lucene.warmup.IndexWarmup.java

License:Apache License

public Map<String, List<IndexTracerResult>> sampleIndex(AtomicReader atomicReader, String context)
        throws IOException {
    Map<String, List<IndexTracerResult>> results = new HashMap<String, List<IndexTracerResult>>();
    if (atomicReader instanceof SegmentReader) {
        SegmentReader segmentReader = (SegmentReader) atomicReader;
        Directory directory = segmentReader.directory();
        if (!(directory instanceof TraceableDirectory)) {
            LOG.info("Context [{1}] cannot warmup directory [{0}] needs to be a TraceableDirectory.", directory,
                    context);//w  w w  .  j av  a 2 s . c o m
            return results;
        }
        IndexTracer tracer = new IndexTracer((TraceableDirectory) directory, _maxSampleSize);
        String fileName = getSampleFileName(segmentReader.getSegmentName());
        List<IndexTracerResult> segmentTraces = new ArrayList<IndexTracerResult>();
        if (directory.fileExists(fileName)) {
            IndexInput input = directory.openInput(fileName, IOContext.READONCE);
            segmentTraces = read(input);
            input.close();
        } else {
            Fields fields = atomicReader.fields();
            for (String field : fields) {
                LOG.debug("Context [{1}] sampling field [{0}].", field, context);
                Terms terms = fields.terms(field);
                boolean hasOffsets = terms.hasOffsets();
                boolean hasPayloads = terms.hasPayloads();
                boolean hasPositions = terms.hasPositions();

                tracer.initTrace(segmentReader, field, hasPositions, hasPayloads, hasOffsets);
                IndexTracerResult result = tracer.runTrace(terms);
                segmentTraces.add(result);
            }
            if (_isClosed.get()) {
                LOG.info("Context [{0}] index closed", context);
                return null;
            }
            IndexOutput output = directory.createOutput(fileName, IOContext.DEFAULT);
            write(segmentTraces, output);
            output.close();
        }
        results.put(segmentReader.getSegmentName(), segmentTraces);
    }
    return results;
}

From source file:org.apache.solr.handler.component.AlfrescoSolrHighlighter.java

License:Open Source License

/**
 * Return a {@link org.apache.lucene.search.highlight.QueryScorer} suitable
 * for this Query and field./*from w  w w. j  a v  a 2  s. c  o  m*/
 * 
 * @param query
 *            The current query
 * @param tokenStream
 *            document text CachingTokenStream
 * @param requestFieldname
 *            The name of the field
 * @param request
 *            The SolrQueryRequest
 */
@Override
protected QueryScorer getSpanQueryScorer(Query query, String requestFieldname, TokenStream tokenStream,
        SolrQueryRequest request) {
    String schemaFieldName = AlfrescoSolrDataModel.getInstance().mapProperty(requestFieldname,
            FieldUse.HIGHLIGHT, request);
    QueryScorer scorer = new QueryScorer(query,
            request.getParams().getFieldBool(requestFieldname, HighlightParams.FIELD_MATCH, false)
                    ? schemaFieldName
                    : null);
    scorer.setExpandMultiTermQuery(request.getParams().getBool(HighlightParams.HIGHLIGHT_MULTI_TERM, true));

    boolean defaultPayloads = true;// overwritten below
    try {
        // It'd be nice to know if payloads are on the tokenStream but the
        // presence of the attribute isn't a good
        // indicator.
        final Terms terms = request.getSearcher().getSlowAtomicReader().fields().terms(schemaFieldName);
        if (terms != null) {
            defaultPayloads = terms.hasPayloads();
        }
    } catch (IOException e) {
        log.error("Couldn't check for existence of payloads", e);
    }
    scorer.setUsePayloads(
            request.getParams().getFieldBool(requestFieldname, HighlightParams.PAYLOADS, defaultPayloads));
    return scorer;
}

From source file:org.codelibs.elasticsearch.common.lucene.all.AllTermQuery.java

License:Apache License

@Override
public Query rewrite(IndexReader reader) throws IOException {
    Query rewritten = super.rewrite(reader);
    if (rewritten != this) {
        return rewritten;
    }/*from  w  ww  . j  a v  a2  s .  com*/
    boolean fieldExists = false;
    boolean hasPayloads = false;
    for (LeafReaderContext context : reader.leaves()) {
        final Terms terms = context.reader().terms(term.field());
        if (terms != null) {
            fieldExists = true;
            if (terms.hasPayloads()) {
                hasPayloads = true;
                break;
            }
        }
    }
    if (fieldExists == false) {
        return new MatchNoDocsQuery();
    }
    if (hasPayloads == false) {
        return new TermQuery(term);
    }
    return this;
}

From source file:org.elasticsearch.action.termvector.TermVectorResponse.java

License:Apache License

private void buildValues(XContentBuilder builder, Terms curTerms, int termFreq) throws IOException {
    if (!(curTerms.hasPayloads() || curTerms.hasOffsets() || curTerms.hasPositions())) {
        return;//from w w  w. j  a  va2s  . c o m
    }

    builder.startArray(FieldStrings.TOKENS);
    for (int i = 0; i < termFreq; i++) {
        builder.startObject();
        if (curTerms.hasPositions()) {
            builder.field(FieldStrings.POS, curentPositions[i]);
        }
        if (curTerms.hasOffsets()) {
            builder.field(FieldStrings.START_OFFSET, currentStartOffset[i]);
            builder.field(FieldStrings.END_OFFSET, currentEndOffset[i]);
        }
        if (curTerms.hasPayloads() && (currentPayloads[i].length() > 0)) {
            builder.field(FieldStrings.PAYLOAD, currentPayloads[i]);
        }
        builder.endObject();
    }
    builder.endArray();

}

From source file:org.elasticsearch.action.termvector.TermVectorResponse.java

License:Apache License

private void initValues(Terms curTerms, DocsAndPositionsEnum posEnum, int termFreq) throws IOException {
    for (int j = 0; j < termFreq; j++) {
        int nextPos = posEnum.nextPosition();
        if (curTerms.hasPositions()) {
            curentPositions[j] = nextPos;
        }//from   w w w. j a  v a2 s .co m
        if (curTerms.hasOffsets()) {
            currentStartOffset[j] = posEnum.startOffset();
            currentEndOffset[j] = posEnum.endOffset();
        }
        if (curTerms.hasPayloads()) {
            BytesRef curPayload = posEnum.getPayload();
            if (curPayload != null) {
                currentPayloads[j] = new BytesArray(curPayload.bytes, 0, curPayload.length);
            } else {
                currentPayloads[j] = null;
            }

        }
    }
}

From source file:org.elasticsearch.action.termvector.TermVectorResponse.java

License:Apache License

private void initMemory(Terms curTerms, int termFreq) {
    // init memory for performance reasons
    if (curTerms.hasPositions()) {
        curentPositions = ArrayUtil.grow(curentPositions, termFreq);
    }/* w  ww . j  av a 2 s . c o m*/
    if (curTerms.hasOffsets()) {
        currentStartOffset = ArrayUtil.grow(currentStartOffset, termFreq);
        currentEndOffset = ArrayUtil.grow(currentEndOffset, termFreq);
    }
    if (curTerms.hasPayloads()) {
        currentPayloads = new BytesArray[termFreq];
    }
}

From source file:org.elasticsearch.action.termvector.TermVectorWriter.java

License:Apache License

void setFields(Fields termVectorsByField, Set<String> selectedFields, EnumSet<Flag> flags,
        Fields topLevelFields) throws IOException {

    int numFieldsWritten = 0;
    TermsEnum iterator = null;//from w  w  w  .j  a  v  a2s . c o m
    DocsAndPositionsEnum docsAndPosEnum = null;
    DocsEnum docsEnum = null;
    TermsEnum topLevelIterator = null;
    for (String field : termVectorsByField) {
        if ((selectedFields != null) && (!selectedFields.contains(field))) {
            continue;
        }

        Terms fieldTermVector = termVectorsByField.terms(field);
        Terms topLevelTerms = topLevelFields.terms(field);

        topLevelIterator = topLevelTerms.iterator(topLevelIterator);
        boolean positions = flags.contains(Flag.Positions) && fieldTermVector.hasPositions();
        boolean offsets = flags.contains(Flag.Offsets) && fieldTermVector.hasOffsets();
        boolean payloads = flags.contains(Flag.Payloads) && fieldTermVector.hasPayloads();
        startField(field, fieldTermVector.size(), positions, offsets, payloads);
        if (flags.contains(Flag.FieldStatistics)) {
            writeFieldStatistics(topLevelTerms);
        }
        iterator = fieldTermVector.iterator(iterator);
        final boolean useDocsAndPos = positions || offsets || payloads;
        while (iterator.next() != null) { // iterate all terms of the
            // current field
            // get the doc frequency
            BytesRef term = iterator.term();
            boolean foundTerm = topLevelIterator.seekExact(term);
            assert (foundTerm);
            startTerm(term);
            if (flags.contains(Flag.TermStatistics)) {
                writeTermStatistics(topLevelIterator);
            }
            if (useDocsAndPos) {
                // given we have pos or offsets
                docsAndPosEnum = writeTermWithDocsAndPos(iterator, docsAndPosEnum, positions, offsets,
                        payloads);
            } else {
                // if we do not have the positions stored, we need to
                // get the frequency from a DocsEnum.
                docsEnum = writeTermWithDocsOnly(iterator, docsEnum);
            }
        }
        numFieldsWritten++;
    }
    response.setTermVectorField(output);
    response.setHeader(writeHeader(numFieldsWritten, flags.contains(Flag.TermStatistics),
            flags.contains(Flag.FieldStatistics)));
}

From source file:org.elasticsearch.action.termvectors.TermVectorsResponse.java

License:Apache License

private void buildValues(XContentBuilder builder, Terms curTerms, int termFreq) throws IOException {
    if (!(curTerms.hasPayloads() || curTerms.hasOffsets() || curTerms.hasPositions())) {
        return;//from  ww  w  .  j a  v a 2s .c  o m
    }

    builder.startArray(FieldStrings.TOKENS);
    for (int i = 0; i < termFreq; i++) {
        builder.startObject();
        if (curTerms.hasPositions()) {
            builder.field(FieldStrings.POS, currentPositions[i]);
        }
        if (curTerms.hasOffsets()) {
            builder.field(FieldStrings.START_OFFSET, currentStartOffset[i]);
            builder.field(FieldStrings.END_OFFSET, currentEndOffset[i]);
        }
        if (curTerms.hasPayloads() && (currentPayloads[i].length() > 0)) {
            builder.field(FieldStrings.PAYLOAD, currentPayloads[i]);
        }
        builder.endObject();
    }
    builder.endArray();
}

From source file:org.elasticsearch.action.termvectors.TermVectorsResponse.java

License:Apache License

private void initValues(Terms curTerms, PostingsEnum posEnum, int termFreq) throws IOException {
    for (int j = 0; j < termFreq; j++) {
        int nextPos = posEnum.nextPosition();
        if (curTerms.hasPositions()) {
            currentPositions[j] = nextPos;
        }/*from   w ww.j  a  v  a  2  s  . c o m*/
        if (curTerms.hasOffsets()) {
            currentStartOffset[j] = posEnum.startOffset();
            currentEndOffset[j] = posEnum.endOffset();
        }
        if (curTerms.hasPayloads()) {
            BytesRef curPayload = posEnum.getPayload();
            if (curPayload != null) {
                currentPayloads[j] = new BytesArray(curPayload.bytes, 0, curPayload.length);
            } else {
                currentPayloads[j] = null;
            }
        }
    }
}

From source file:org.elasticsearch.action.termvectors.TermVectorsResponse.java

License:Apache License

private void initMemory(Terms curTerms, int termFreq) {
    // init memory for performance reasons
    if (curTerms.hasPositions()) {
        currentPositions = ArrayUtil.grow(currentPositions, termFreq);
    }/* w  w  w  . j  a  v  a  2  s .co  m*/
    if (curTerms.hasOffsets()) {
        currentStartOffset = ArrayUtil.grow(currentStartOffset, termFreq);
        currentEndOffset = ArrayUtil.grow(currentEndOffset, termFreq);
    }
    if (curTerms.hasPayloads()) {
        currentPayloads = new BytesArray[termFreq];
    }
}