List of usage examples for org.apache.lucene.index LeafReader postings
public final PostingsEnum postings(Term term, int flags) throws IOException
From source file:com.shaie.annots.AnnotationsUtils.java
License:Apache License
public static void printAnnotations(LeafReader reader, Term term) throws IOException { System.out.println("Annotations for " + term); final ByteArrayDataInput in = new ByteArrayDataInput(); final PostingsEnum postings = reader.postings(term, PostingsEnum.PAYLOADS); for (int docID = postings.nextDoc(); docID != DocIdSetIterator.NO_MORE_DOCS; docID = postings.nextDoc()) { final int freq = postings.freq(); System.out.println(" doc=" + docID + ", freq=" + freq); for (int i = 0; i < freq; i++) { postings.nextPosition();/*ww w. ja v a2 s . c o m*/ final BytesRef payload = postings.getPayload(); in.reset(payload.bytes, payload.offset, payload.length); System.out.println(" start=" + in.readVInt() + ", length=" + in.readVInt()); } } }
From source file:org.elasticsearch.index.percolator.PercolatorQueryCache.java
License:Apache License
QueriesLeaf loadQueries(LeafReaderContext context, IndexShard indexShard) throws IOException { Version indexVersionCreated = indexShard.indexSettings().getIndexVersionCreated(); MapperService mapperService = indexShard.mapperService(); LeafReader leafReader = context.reader(); ShardId shardId = ShardUtils.extractShardId(leafReader); if (shardId == null) { throw new IllegalStateException("can't resolve shard id"); }/* w w w . j a v a 2 s . c om*/ if (indexSettings.getIndex().equals(shardId.getIndex()) == false) { // percolator cache insanity String message = "Trying to load queries for index " + shardId.getIndex() + " with cache of index " + indexSettings.getIndex(); throw new IllegalStateException(message); } IntObjectHashMap<Query> queries = new IntObjectHashMap<>(); boolean legacyLoading = indexVersionCreated.before(Version.V_5_0_0_alpha1); if (legacyLoading) { PostingsEnum postings = leafReader.postings(new Term(TypeFieldMapper.NAME, LEGACY_TYPE_NAME), PostingsEnum.NONE); if (postings != null) { LegacyQueryFieldVisitor visitor = new LegacyQueryFieldVisitor(); for (int docId = postings.nextDoc(); docId != DocIdSetIterator.NO_MORE_DOCS; docId = postings .nextDoc()) { leafReader.document(docId, visitor); queries.put(docId, parseLegacyPercolatorDocument(docId, visitor.source)); visitor.source = null; // reset } } } else { // Each type can have one percolator field mapper, // So for each type we check if there is a percolator field mapper // and parse all the queries for the documents of that type. IndexSearcher indexSearcher = new IndexSearcher(leafReader); for (DocumentMapper documentMapper : mapperService.docMappers(false)) { Weight queryWeight = indexSearcher.createNormalizedWeight(documentMapper.typeFilter(), false); for (FieldMapper fieldMapper : documentMapper.mappers()) { if (fieldMapper instanceof PercolatorFieldMapper) { PercolatorFieldType fieldType = (PercolatorFieldType) fieldMapper.fieldType(); BinaryDocValues binaryDocValues = leafReader .getBinaryDocValues(fieldType.getQueryBuilderFieldName()); if (binaryDocValues != null) { // use the same leaf reader context the indexSearcher is using too: Scorer scorer = queryWeight.scorer(leafReader.getContext()); if (scorer != null) { DocIdSetIterator iterator = scorer.iterator(); for (int docId = iterator .nextDoc(); docId != DocIdSetIterator.NO_MORE_DOCS; docId = iterator .nextDoc()) { BytesRef qbSource = binaryDocValues.get(docId); if (qbSource.length > 0) { queries.put(docId, parseQueryBuilder(docId, qbSource)); } } } } break; } } } } leafReader.addCoreClosedListener(this); return new QueriesLeaf(shardId, queries); }
From source file:org.tallison.lucene.search.concordance.TestSimpleAnalyzerUtil.java
License:Apache License
private void executeNeedleTests(Analyzer analyzer) throws Exception { String needle = getNeedle(analyzer); int numFieldValues = 23; Directory directory = buildNeedleIndex(needle, analyzer, numFieldValues); IndexReader reader = DirectoryReader.open(directory); LeafReaderContext ctx = reader.leaves().get(0); LeafReader r = ctx.reader(); PostingsEnum dpe = r.postings(new Term(FIELD, needle), PostingsEnum.ALL); int numTests = 0; try {/*w w w . ja va2s . co m*/ while (dpe.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { int frq = dpe.freq(); int advanced = 0; String[] fieldValues = r.document(dpe.docID()).getValues(FIELD); while (++advanced < frq) { dpe.nextPosition(); String rebuilt = SimpleAnalyzerUtil.substringFromMultiValuedFields(dpe.startOffset(), dpe.endOffset(), fieldValues, analyzer.getOffsetGap(FIELD), " | "); assertEquals(needle, rebuilt); numTests++; } } } finally { reader.close(); directory.close(); } assertEquals("number of tests", numFieldValues - 1, numTests); }