List of usage examples for org.apache.lucene.index LeafReader getFieldInfos
public abstract FieldInfos getFieldInfos();
From source file:com.floragunn.searchguard.configuration.DlsFlsFilterLeafReader.java
License:Open Source License
DlsFlsFilterLeafReader(final LeafReader delegate, final Set<String> includes, final Query dlsQuery) { super(delegate); flsEnabled = includes != null && !includes.isEmpty(); dlsEnabled = dlsQuery != null;//from w w w . jav a 2 s .c o m if (flsEnabled) { this.includes = includes.toArray(new String[0]); final FieldInfos infos = delegate.getFieldInfos(); final List<FieldInfo> fi = new ArrayList<FieldInfo>(infos.size()); for (final FieldInfo info : infos) { final String fname = info.name; if ((!WildcardMatcher.containsWildcard(fname) && includes.contains(fname)) || WildcardMatcher.matchAny(this.includes, fname)) { fi.add(info); } } this.flsFieldInfos = new FieldInfos(fi.toArray(new FieldInfo[0])); } else { this.includes = null; this.flsFieldInfos = null; } if (dlsEnabled) { try { //borrowed from Apache Lucene (Copyright Apache Software Foundation (ASF)) final IndexSearcher searcher = new IndexSearcher(this); searcher.setQueryCache(null); final boolean needsScores = false; final Weight preserveWeight = searcher.createNormalizedWeight(dlsQuery, needsScores); final int maxDoc = in.maxDoc(); final FixedBitSet bits = new FixedBitSet(maxDoc); final Scorer preverveScorer = preserveWeight.scorer(this.getContext()); if (preverveScorer != null) { bits.or(preverveScorer.iterator()); } if (in.hasDeletions()) { final Bits oldLiveDocs = in.getLiveDocs(); assert oldLiveDocs != null; final DocIdSetIterator it = new BitSetIterator(bits, 0L); for (int i = it.nextDoc(); i != DocIdSetIterator.NO_MORE_DOCS; i = it.nextDoc()) { if (!oldLiveDocs.get(i)) { bits.clear(i); } } } this.liveDocs = bits; this.numDocs = bits.cardinality(); } catch (Exception e) { throw new RuntimeException(e); } } else { this.liveDocs = null; this.numDocs = -1; } }
From source file:com.qwazr.search.field.FieldTypeAbstract.java
License:Apache License
public ValueConverter getConverter(final LeafReader leafReader) throws IOException { FieldInfos fieldInfos = leafReader.getFieldInfos(); if (fieldInfos == null) return null; FieldInfo fieldInfo = fieldInfos.fieldInfo(fieldName); if (fieldInfo == null) return null; return ValueConverter.newConverter(fieldDef, leafReader, fieldInfo); }
From source file:com.qwazr.search.index.FunctionCollector.java
License:Apache License
@Override public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException { final LeafReader leafReader = context.reader(); FieldInfo fieldInfo = leafReader.getFieldInfos().fieldInfo(function.field); ValueConverter converter = fieldType.getConverter(leafReader); if (converter == null) return DoNothingCollector.INSTANCE; switch (function.function) { case max:// w w w .j a v a2 s. c om if (converter.isNumeric) return new MaxNumericFunctionCollector(converter); else return new MaxBinaryFunctionCollector(converter); case min: if (converter.isNumeric) return new MinNumericFunctionCollector(converter); else return new MinBinaryFunctionCollector(converter); default: throw new IOException("Unknown function for field " + function.field); } }
From source file:org.apache.solr.handler.component.AlfrescoLukeRequestHandler.java
License:Open Source License
private static SimpleOrderedMap<Object> getIndexedFieldsInfo(SolrQueryRequest req) throws Exception { SolrIndexSearcher searcher = req.getSearcher(); SolrParams params = req.getParams(); Set<String> fields = null; String fl = params.get(CommonParams.FL); if (fl != null) { fields = new TreeSet<>(Arrays.asList(fl.split("[,\\s]+"))); }/*from w w w . j av a 2 s. c o m*/ LeafReader reader = searcher.getSlowAtomicReader(); IndexSchema schema = searcher.getSchema(); // Don't be tempted to put this in the loop below, the whole point here // is to alphabetize the fields! Set<String> fieldNames = new TreeSet<>(); for (FieldInfo fieldInfo : reader.getFieldInfos()) { fieldNames.add(fieldInfo.name); } // Walk the term enum and keep a priority queue for each map in our set SimpleOrderedMap<Object> vInfo = new SimpleOrderedMap<>(); SimpleOrderedMap<Object> aInfo = new SimpleOrderedMap<>(); for (String fieldName : fieldNames) { if (fields != null && !fields.contains(fieldName) && !fields.contains("*")) { continue; // we're not interested in this field Still an issue // here } SimpleOrderedMap<Object> fieldMap = new SimpleOrderedMap<>(); SchemaField sfield = schema.getFieldOrNull(fieldName); FieldType ftype = (sfield == null) ? null : sfield.getType(); fieldMap.add("type", (ftype == null) ? null : ftype.getTypeName()); fieldMap.add("schema", getFieldFlags(sfield)); if (sfield != null && schema.isDynamicField(sfield.getName()) && schema.getDynamicPattern(sfield.getName()) != null) { fieldMap.add("dynamicBase", schema.getDynamicPattern(sfield.getName())); } Terms terms = reader.fields().terms(fieldName); if (terms == null) { // Not indexed, so we need to report what we // can (it made it through the fl param if // specified) vInfo.add(AlfrescoSolrDataModel.getInstance().getAlfrescoPropertyFromSchemaField(fieldName), fieldMap); aInfo.add(fieldName, fieldMap); continue; } if (sfield != null && sfield.indexed()) { if (params.getBool(INCLUDE_INDEX_FIELD_FLAGS, true)) { Document doc = getFirstLiveDoc(terms, reader); if (doc != null) { // Found a document with this field try { IndexableField fld = doc.getField(fieldName); if (fld != null) { fieldMap.add("index", getFieldFlags(fld)); } else { // it is a non-stored field... fieldMap.add("index", "(unstored field)"); } } catch (Exception ex) { log.warn("error reading field: " + fieldName); } } } fieldMap.add("docs", terms.getDocCount()); } if (fields != null && (fields.contains(fieldName) || fields.contains("*"))) { getDetailedFieldInfo(req, fieldName, fieldMap); } // Add the field vInfo.add(fieldName, fieldMap); aInfo.add(AlfrescoSolrDataModel.getInstance().getAlfrescoPropertyFromSchemaField(fieldName), fieldMap); } SimpleOrderedMap<Object> finfo = new SimpleOrderedMap<>(); finfo.addAll(vInfo); // finfo.add("mimetype()", finfo.get("cm:content.mimetype")); // finfo.add("contentSize()", finfo.get("cm:content.size")); finfo.addAll(aInfo); return finfo; }
From source file:org.apache.solr.index.SlowCompositeReaderWrapper.java
License:Apache License
@Override public SortedDocValues getSortedDocValues(String field) throws IOException { ensureOpen();// w ww . j a v a 2 s . c o m OrdinalMap map = null; synchronized (cachedOrdMaps) { map = cachedOrdMaps.get(field); if (map == null) { // uncached, or not a multi dv SortedDocValues dv = MultiDocValues.getSortedValues(in, field); if (dv instanceof MultiSortedDocValues) { map = ((MultiSortedDocValues) dv).mapping; if (map.owner == getCoreCacheKey() && merging == false) { cachedOrdMaps.put(field, map); } } return dv; } } int size = in.leaves().size(); final SortedDocValues[] values = new SortedDocValues[size]; final int[] starts = new int[size + 1]; long totalCost = 0; for (int i = 0; i < size; i++) { LeafReaderContext context = in.leaves().get(i); final LeafReader reader = context.reader(); final FieldInfo fieldInfo = reader.getFieldInfos().fieldInfo(field); if (fieldInfo != null && fieldInfo.getDocValuesType() != DocValuesType.SORTED) { return null; } SortedDocValues v = reader.getSortedDocValues(field); if (v == null) { v = DocValues.emptySorted(); } totalCost += v.cost(); values[i] = v; starts[i] = context.docBase; } starts[size] = maxDoc(); return new MultiSortedDocValues(values, starts, map, totalCost); }
From source file:org.apache.solr.index.SlowCompositeReaderWrapper.java
License:Apache License
@Override public SortedSetDocValues getSortedSetDocValues(String field) throws IOException { ensureOpen();/*from w w w . j a va 2 s . co m*/ OrdinalMap map = null; synchronized (cachedOrdMaps) { map = cachedOrdMaps.get(field); if (map == null) { // uncached, or not a multi dv SortedSetDocValues dv = MultiDocValues.getSortedSetValues(in, field); if (dv instanceof MultiDocValues.MultiSortedSetDocValues) { map = ((MultiDocValues.MultiSortedSetDocValues) dv).mapping; if (map.owner == getCoreCacheKey() && merging == false) { cachedOrdMaps.put(field, map); } } return dv; } } assert map != null; int size = in.leaves().size(); final SortedSetDocValues[] values = new SortedSetDocValues[size]; final int[] starts = new int[size + 1]; long cost = 0; for (int i = 0; i < size; i++) { LeafReaderContext context = in.leaves().get(i); final LeafReader reader = context.reader(); final FieldInfo fieldInfo = reader.getFieldInfos().fieldInfo(field); if (fieldInfo != null && fieldInfo.getDocValuesType() != DocValuesType.SORTED_SET) { return null; } SortedSetDocValues v = reader.getSortedSetDocValues(field); if (v == null) { v = DocValues.emptySortedSet(); } values[i] = v; starts[i] = context.docBase; cost += v.cost(); } starts[size] = maxDoc(); return new MultiDocValues.MultiSortedSetDocValues(values, starts, map, cost); }
From source file:org.apache.solr.index.UninvertDocValuesMergePolicyTest.java
License:Apache License
public void testIndexAndAddDocValues() throws Exception { Random rand = random();//from w ww .j a v a 2s .c o m for (int i = 0; i < 100; i++) { assertU(adoc(ID_FIELD, String.valueOf(i), TEST_FIELD, String.valueOf(i))); if (rand.nextBoolean()) { assertU(commit()); } } assertU(commit()); // Assert everything has been indexed and there are no docvalues withNewRawReader(h, topReader -> { assertEquals(100, topReader.numDocs()); final FieldInfos infos = MultiFields.getMergedFieldInfos(topReader); // The global field type should not have docValues yet assertEquals(DocValuesType.NONE, infos.fieldInfo(TEST_FIELD).getDocValuesType()); }); addDocValuesTo(h, TEST_FIELD); // Add some more documents with doc values turned on including updating some for (int i = 90; i < 110; i++) { assertU(adoc(ID_FIELD, String.valueOf(i), TEST_FIELD, String.valueOf(i))); if (rand.nextBoolean()) { assertU(commit()); } } assertU(commit()); withNewRawReader(h, topReader -> { assertEquals(110, topReader.numDocs()); final FieldInfos infos = MultiFields.getMergedFieldInfos(topReader); // The global field type should have docValues because a document with dvs was added assertEquals(DocValuesType.SORTED, infos.fieldInfo(TEST_FIELD).getDocValuesType()); }); int optimizeSegments = 1; assertU(optimize("maxSegments", String.valueOf(optimizeSegments))); // Assert all docs have the right docvalues withNewRawReader(h, topReader -> { // Assert merged into one segment assertEquals(110, topReader.numDocs()); assertEquals(optimizeSegments, topReader.leaves().size()); final FieldInfos infos = MultiFields.getMergedFieldInfos(topReader); // The global field type should have docValues because a document with dvs was added assertEquals(DocValuesType.SORTED, infos.fieldInfo(TEST_FIELD).getDocValuesType()); // Check that all segments have the right docvalues type with the correct value // Also check that other fields (e.g. the id field) didn't mistakenly get docvalues added for (LeafReaderContext ctx : topReader.leaves()) { LeafReader r = ctx.reader(); SortedDocValues docvalues = r.getSortedDocValues(TEST_FIELD); for (int i = 0; i < r.numDocs(); ++i) { Document doc = r.document(i); String v = doc.getField(TEST_FIELD).stringValue(); String id = doc.getField(ID_FIELD).stringValue(); assertEquals(DocValuesType.SORTED, r.getFieldInfos().fieldInfo(TEST_FIELD).getDocValuesType()); assertEquals(DocValuesType.NONE, r.getFieldInfos().fieldInfo(ID_FIELD).getDocValuesType()); assertEquals(v, id); docvalues.nextDoc(); assertEquals(v, docvalues.binaryValue().utf8ToString()); } } }); }
From source file:org.apache.solr.index.UninvertDocValuesMergePolicyTest.java
License:Apache License
public void testNonIndexedFieldDoesNonFail() throws Exception { // Remove Indexed from fieldType removeIndexFrom(h, TEST_FIELD);//w w w .jav a 2s. c o m assertU(adoc(ID_FIELD, String.valueOf(1), TEST_FIELD, String.valueOf(1))); assertU(commit()); addDocValuesTo(h, TEST_FIELD); assertU(adoc(ID_FIELD, String.valueOf(2), TEST_FIELD, String.valueOf(2))); assertU(commit()); assertU(optimize("maxSegments", "1")); withNewRawReader(h, topReader -> { // Assert merged into one segment assertEquals(2, topReader.numDocs()); assertEquals(1, topReader.leaves().size()); final FieldInfos infos = MultiFields.getMergedFieldInfos(topReader); // The global field type should have docValues because a document with dvs was added assertEquals(DocValuesType.SORTED, infos.fieldInfo(TEST_FIELD).getDocValuesType()); for (LeafReaderContext ctx : topReader.leaves()) { LeafReader r = ctx.reader(); SortedDocValues docvalues = r.getSortedDocValues(TEST_FIELD); for (int i = 0; i < r.numDocs(); ++i) { Document doc = r.document(i); String v = doc.getField(TEST_FIELD).stringValue(); String id = doc.getField(ID_FIELD).stringValue(); assertEquals(DocValuesType.SORTED, r.getFieldInfos().fieldInfo(TEST_FIELD).getDocValuesType()); assertEquals(DocValuesType.NONE, r.getFieldInfos().fieldInfo(ID_FIELD).getDocValuesType()); if (id.equals("2")) { assertTrue(docvalues.advanceExact(i)); assertEquals(v, docvalues.binaryValue().utf8ToString()); } else { assertFalse(docvalues.advanceExact(i)); } } } }); }
From source file:org.apache.solr.schema.TestHalfAndHalfDocValues.java
License:Apache License
public void testHalfAndHalfDocValues() throws Exception { // Insert two docs without docvalues String fieldname = "string_add_dv_later"; assertU(adoc("id", "3", fieldname, "c")); assertU(commit());/*from w w w . j a va 2 s .co m*/ assertU(adoc("id", "1", fieldname, "a")); assertU(commit()); try (SolrCore core = h.getCoreInc()) { assertFalse(core.getLatestSchema().getField(fieldname).hasDocValues()); // Add docvalues to the field type IndexSchema schema = core.getLatestSchema(); SchemaField oldField = schema.getField(fieldname); int newProperties = oldField.getProperties() | SchemaField.DOC_VALUES; SchemaField sf = new SchemaField(fieldname, oldField.getType(), newProperties, null); schema.getFields().put(fieldname, sf); // Insert a new doc with docvalues assertU(adoc("id", "2", fieldname, "b")); assertU(commit()); // Check there are a mix of segments with and without docvalues final RefCounted<SolrIndexSearcher> searcherRef = core.openNewSearcher(true, true); final SolrIndexSearcher searcher = searcherRef.get(); try { final DirectoryReader topReader = searcher.getRawReader(); //Assert no merges assertEquals(3, topReader.numDocs()); assertEquals(3, topReader.leaves().size()); final FieldInfos infos = MultiFields.getMergedFieldInfos(topReader); //The global field type should have docValues because a document with dvs was added assertEquals(DocValuesType.SORTED, infos.fieldInfo(fieldname).getDocValuesType()); for (LeafReaderContext ctx : topReader.leaves()) { LeafReader r = ctx.reader(); //Make sure there were no merges assertEquals(1, r.numDocs()); Document doc = r.document(0); String id = doc.getField("id").stringValue(); if (id.equals("1") || id.equals("3")) { assertEquals(DocValuesType.NONE, r.getFieldInfos().fieldInfo(fieldname).getDocValuesType()); } else { assertEquals(DocValuesType.SORTED, r.getFieldInfos().fieldInfo(fieldname).getDocValuesType()); } } } finally { searcherRef.decref(); } } // Assert sort order is correct assertQ(req("q", "string_add_dv_later:*", "sort", "string_add_dv_later asc"), "//*[@numFound='3']", "//result/doc[1]/int[@name='id'][.=1]", "//result/doc[2]/int[@name='id'][.=2]", "//result/doc[3]/int[@name='id'][.=3]"); }
From source file:org.apache.solr.schema.TestSortableTextField.java
License:Apache License
public void testWhiteboxIndexReader() throws Exception { assertU(adoc("id", "1", "whitespace_stxt", "how now brown cow ?", "whitespace_m_stxt", "xxx", "whitespace_m_stxt", "yyy", "whitespace_f_stxt", "aaa bbb", "keyword_stxt", "Blarggghhh!")); assertU(commit());/*from w w w . ja v a 2 s . c o m*/ final RefCounted<SolrIndexSearcher> searcher = h.getCore().getNewestSearcher(false); try { final LeafReader r = searcher.get().getSlowAtomicReader(); // common cases... for (String field : Arrays.asList("keyword_stxt", "keyword_dv_stxt", "whitespace_stxt", "whitespace_f_stxt", "whitespace_l_stxt")) { assertNotNull("FieldInfos: " + field, r.getFieldInfos().fieldInfo(field)); assertEquals("DocValuesType: " + field, DocValuesType.SORTED, r.getFieldInfos().fieldInfo(field).getDocValuesType()); assertNotNull("DocValues: " + field, r.getSortedDocValues(field)); assertNotNull("Terms: " + field, r.terms(field)); } // special cases... assertNotNull(r.getFieldInfos().fieldInfo("whitespace_nodv_stxt")); assertEquals(DocValuesType.NONE, r.getFieldInfos().fieldInfo("whitespace_nodv_stxt").getDocValuesType()); assertNull(r.getSortedDocValues("whitespace_nodv_stxt")); assertNotNull(r.terms("whitespace_nodv_stxt")); // assertNotNull(r.getFieldInfos().fieldInfo("whitespace_nois_stxt")); assertEquals(DocValuesType.SORTED, r.getFieldInfos().fieldInfo("whitespace_nois_stxt").getDocValuesType()); assertNotNull(r.getSortedDocValues("whitespace_nois_stxt")); assertNull(r.terms("whitespace_nois_stxt")); // assertNotNull(r.getFieldInfos().fieldInfo("whitespace_m_stxt")); assertEquals(DocValuesType.SORTED_SET, r.getFieldInfos().fieldInfo("whitespace_m_stxt").getDocValuesType()); assertNotNull(r.getSortedSetDocValues("whitespace_m_stxt")); assertNotNull(r.terms("whitespace_m_stxt")); } finally { if (null != searcher) { searcher.decref(); } } }