List of usage examples for org.apache.lucene.index SortedSetDocValues lookupOrd
public abstract BytesRef lookupOrd(long ord) throws IOException;
From source file:org.alfresco.solr.transformer.DocValueDocTransformer.java
License:Open Source License
@Override public void transform(SolrDocument doc, int docid, float score) throws IOException { for (String fieldName : context.getSearcher().getFieldNames()) { SchemaField schemaField = context.getSearcher().getSchema().getFieldOrNull(fieldName); if (schemaField != null) { if (schemaField.hasDocValues()) { SortedDocValues sortedDocValues = context.getSearcher().getSlowAtomicReader() .getSortedDocValues(fieldName); if (sortedDocValues != null) { int ordinal = sortedDocValues.getOrd(docid); if (ordinal > -1) { doc.removeFields(fieldName); String alfrescoFieldName = AlfrescoSolrDataModel.getInstance() .getAlfrescoPropertyFromSchemaField(fieldName); doc.removeFields(alfrescoFieldName); doc.addField(alfrescoFieldName, schemaField.getType().toObject(schemaField, sortedDocValues.lookupOrd(ordinal))); }//www . jav a2 s . c o m } SortedSetDocValues sortedSetDocValues = context.getSearcher().getSlowAtomicReader() .getSortedSetDocValues(fieldName); if (sortedSetDocValues != null) { ArrayList<Object> newValues = new ArrayList<Object>(); sortedSetDocValues.setDocument(docid); long ordinal; while ((ordinal = sortedSetDocValues.nextOrd()) != SortedSetDocValues.NO_MORE_ORDS) { newValues.add(schemaField.getType().toObject(schemaField, sortedSetDocValues.lookupOrd(ordinal))); } doc.removeFields(fieldName); String alfrescoFieldName = AlfrescoSolrDataModel.getInstance() .getAlfrescoPropertyFromSchemaField(fieldName); doc.removeFields(alfrescoFieldName); doc.addField(alfrescoFieldName, newValues); } BinaryDocValues binaryDocValues = context.getSearcher().getSlowAtomicReader() .getBinaryDocValues(fieldName); if (binaryDocValues != null) { doc.removeFields(fieldName); String alfrescoFieldName = AlfrescoSolrDataModel.getInstance() .getAlfrescoPropertyFromSchemaField(fieldName); doc.removeFields(alfrescoFieldName); doc.addField(alfrescoFieldName, schemaField.getType().toObject(schemaField, binaryDocValues.get(docid))); } if (schemaField.getType().getNumericType() != null) { NumericDocValues numericDocValues = context.getSearcher().getSlowAtomicReader() .getNumericDocValues(fieldName); if (numericDocValues != null) { doc.removeFields(fieldName); String alfrescoFieldName = AlfrescoSolrDataModel.getInstance() .getAlfrescoPropertyFromSchemaField(fieldName); doc.removeFields(alfrescoFieldName); switch (schemaField.getType().getNumericType()) { case DOUBLE: doc.addField(alfrescoFieldName, Double.longBitsToDouble(numericDocValues.get(docid))); break; case FLOAT: doc.addField(alfrescoFieldName, Float.intBitsToFloat((int) numericDocValues.get(docid))); break; case INT: doc.addField(alfrescoFieldName, (int) numericDocValues.get(docid)); break; case LONG: doc.addField(alfrescoFieldName, numericDocValues.get(docid)); break; } } SortedNumericDocValues sortedNumericDocValues = context.getSearcher().getSlowAtomicReader() .getSortedNumericDocValues(fieldName); if (sortedNumericDocValues != null) { sortedNumericDocValues.setDocument(docid); doc.removeFields(fieldName); String alfrescoFieldName = AlfrescoSolrDataModel.getInstance() .getAlfrescoPropertyFromSchemaField(fieldName); doc.removeFields(alfrescoFieldName); ArrayList<Object> newValues = new ArrayList<Object>(sortedNumericDocValues.count()); if (sortedNumericDocValues.count() > 0) { for (int i = 0; i < sortedNumericDocValues.count(); i++) { switch (schemaField.getType().getNumericType()) { case DOUBLE: newValues.add(NumericUtils .sortableLongToDouble(sortedNumericDocValues.valueAt(i))); break; case FLOAT: newValues.add(NumericUtils .sortableIntToFloat((int) sortedNumericDocValues.valueAt(i))); break; case INT: newValues.add((int) sortedNumericDocValues.valueAt(i)); break; case LONG: newValues.add(sortedNumericDocValues.valueAt(i)); break; } } } doc.addField(alfrescoFieldName, newValues); } } } } } }
From source file:org.apache.solr.request.DocValuesStats.java
License:Apache License
public static StatsValues getCounts(SolrIndexSearcher searcher, String fieldName, DocSet docs, boolean calcDistinct, String[] facet) throws IOException { SchemaField schemaField = searcher.getSchema().getField(fieldName); FieldType ft = schemaField.getType(); StatsValues res = StatsValuesFactory.createStatsValues(schemaField, calcDistinct); //Initialize facetstats, if facets have been passed in final FieldFacetStats[] facetStats = new FieldFacetStats[facet.length]; int upto = 0; for (String facetField : facet) { SchemaField fsf = searcher.getSchema().getField(facetField); if (fsf.multiValued()) { throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Stats can only facet on single-valued fields, not: " + facetField); }/*from w w w . j av a2s.c o m*/ SchemaField facetSchemaField = searcher.getSchema().getField(facetField); facetStats[upto++] = new FieldFacetStats(searcher, facetField, schemaField, facetSchemaField, calcDistinct); } // TODO: remove multiValuedFieldCache(), check dv type / uninversion type? final boolean multiValued = schemaField.multiValued() || ft.multiValuedFieldCache(); SortedSetDocValues si; // for term lookups only OrdinalMap ordinalMap = null; // for mapping per-segment ords to global ones if (multiValued) { si = searcher.getAtomicReader().getSortedSetDocValues(fieldName); if (si instanceof MultiSortedSetDocValues) { ordinalMap = ((MultiSortedSetDocValues) si).mapping; } } else { SortedDocValues single = searcher.getAtomicReader().getSortedDocValues(fieldName); si = single == null ? null : DocValues.singleton(single); if (single instanceof MultiSortedDocValues) { ordinalMap = ((MultiSortedDocValues) single).mapping; } } if (si == null) { si = DocValues.emptySortedSet(); } if (si.getValueCount() >= Integer.MAX_VALUE) { throw new UnsupportedOperationException( "Currently this stats method is limited to " + Integer.MAX_VALUE + " unique terms"); } int missingDocCountTotal = 0; final int nTerms = (int) si.getValueCount(); // count collection array only needs to be as big as the number of terms we are // going to collect counts for. final int[] counts = new int[nTerms]; Filter filter = docs.getTopFilter(); List<AtomicReaderContext> leaves = searcher.getTopReaderContext().leaves(); for (int subIndex = 0; subIndex < leaves.size(); subIndex++) { AtomicReaderContext leaf = leaves.get(subIndex); DocIdSet dis = filter.getDocIdSet(leaf, null); // solr docsets already exclude any deleted docs DocIdSetIterator disi = null; if (dis != null) { disi = dis.iterator(); } if (disi != null) { int docBase = leaf.docBase; if (multiValued) { SortedSetDocValues sub = leaf.reader().getSortedSetDocValues(fieldName); if (sub == null) { sub = DocValues.emptySortedSet(); } final SortedDocValues singleton = DocValues.unwrapSingleton(sub); if (singleton != null) { // some codecs may optimize SORTED_SET storage for single-valued fields missingDocCountTotal += accumSingle(counts, docBase, facetStats, singleton, disi, subIndex, ordinalMap); } else { missingDocCountTotal += accumMulti(counts, docBase, facetStats, sub, disi, subIndex, ordinalMap); } } else { SortedDocValues sub = leaf.reader().getSortedDocValues(fieldName); if (sub == null) { sub = DocValues.emptySorted(); } missingDocCountTotal += accumSingle(counts, docBase, facetStats, sub, disi, subIndex, ordinalMap); } } } // add results in index order for (int ord = 0; ord < counts.length; ord++) { int count = counts[ord]; if (count > 0) { final BytesRef value = si.lookupOrd(ord); res.accumulate(value, count); for (FieldFacetStats f : facetStats) { f.accumulateTermNum(ord, value); } } } res.addMissing(missingDocCountTotal); if (facetStats.length > 0) { for (FieldFacetStats f : facetStats) { Map<String, StatsValues> facetStatsValues = f.facetStatsValues; f.accumulateMissing(); res.addFacet(f.name, facetStatsValues); } } return res; }
From source file:org.apache.solr.search.SolrDocumentFetcher.java
License:Apache License
/** * This will fetch and add the docValues fields to a given SolrDocument/SolrInputDocument * * @param doc/* w ww . j a v a 2s . c o m*/ * A SolrDocument or SolrInputDocument instance where docValues will be added * @param docid * The lucene docid of the document to be populated * @param fields * The list of docValues fields to be decorated */ public void decorateDocValueFields(@SuppressWarnings("rawtypes") SolrDocumentBase doc, int docid, Set<String> fields) throws IOException { final List<LeafReaderContext> leafContexts = searcher.getLeafContexts(); final int subIndex = ReaderUtil.subIndex(docid, leafContexts); final int localId = docid - leafContexts.get(subIndex).docBase; final LeafReader leafReader = leafContexts.get(subIndex).reader(); for (String fieldName : fields) { final SchemaField schemaField = searcher.getSchema().getFieldOrNull(fieldName); if (schemaField == null || !schemaField.hasDocValues() || doc.containsKey(fieldName)) { log.warn("Couldn't decorate docValues for field: [{}], schemaField: [{}]", fieldName, schemaField); continue; } FieldInfo fi = searcher.getFieldInfos().fieldInfo(fieldName); if (fi == null) { continue; // Searcher doesn't have info about this field, hence ignore it. } final DocValuesType dvType = fi.getDocValuesType(); switch (dvType) { case NUMERIC: final NumericDocValues ndv = leafReader.getNumericDocValues(fieldName); if (ndv == null) { continue; } Long val; if (ndv.advanceExact(localId)) { val = ndv.longValue(); } else { continue; } Object newVal = val; if (schemaField.getType().isPointField()) { // TODO: Maybe merge PointField with TrieFields here NumberType type = schemaField.getType().getNumberType(); switch (type) { case INTEGER: newVal = val.intValue(); break; case LONG: newVal = val.longValue(); break; case FLOAT: newVal = Float.intBitsToFloat(val.intValue()); break; case DOUBLE: newVal = Double.longBitsToDouble(val); break; case DATE: newVal = new Date(val); break; default: throw new AssertionError("Unexpected PointType: " + type); } } else { if (schemaField.getType() instanceof TrieIntField) { newVal = val.intValue(); } else if (schemaField.getType() instanceof TrieFloatField) { newVal = Float.intBitsToFloat(val.intValue()); } else if (schemaField.getType() instanceof TrieDoubleField) { newVal = Double.longBitsToDouble(val); } else if (schemaField.getType() instanceof TrieDateField) { newVal = new Date(val); } else if (schemaField.getType() instanceof EnumField) { newVal = ((EnumField) schemaField.getType()).intValueToStringValue(val.intValue()); } } doc.addField(fieldName, newVal); break; case BINARY: BinaryDocValues bdv = leafReader.getBinaryDocValues(fieldName); if (bdv == null) { continue; } BytesRef value; if (bdv.advanceExact(localId)) { value = BytesRef.deepCopyOf(bdv.binaryValue()); } else { continue; } doc.addField(fieldName, value); break; case SORTED: SortedDocValues sdv = leafReader.getSortedDocValues(fieldName); if (sdv == null) { continue; } if (sdv.advanceExact(localId)) { final BytesRef bRef = sdv.binaryValue(); // Special handling for Boolean fields since they're stored as 'T' and 'F'. if (schemaField.getType() instanceof BoolField) { doc.addField(fieldName, schemaField.getType().toObject(schemaField, bRef)); } else { doc.addField(fieldName, bRef.utf8ToString()); } } break; case SORTED_NUMERIC: final SortedNumericDocValues numericDv = leafReader.getSortedNumericDocValues(fieldName); NumberType type = schemaField.getType().getNumberType(); if (numericDv != null) { if (numericDv.advance(localId) == localId) { final List<Object> outValues = new ArrayList<Object>(numericDv.docValueCount()); for (int i = 0; i < numericDv.docValueCount(); i++) { long number = numericDv.nextValue(); switch (type) { case INTEGER: outValues.add((int) number); break; case LONG: outValues.add(number); break; case FLOAT: outValues.add(NumericUtils.sortableIntToFloat((int) number)); break; case DOUBLE: outValues.add(NumericUtils.sortableLongToDouble(number)); break; case DATE: outValues.add(new Date(number)); break; default: throw new AssertionError("Unexpected PointType: " + type); } } assert outValues.size() > 0; doc.addField(fieldName, outValues); } } case SORTED_SET: final SortedSetDocValues values = leafReader.getSortedSetDocValues(fieldName); if (values != null && values.getValueCount() > 0) { if (values.advance(localId) == localId) { final List<Object> outValues = new LinkedList<>(); for (long ord = values.nextOrd(); ord != SortedSetDocValues.NO_MORE_ORDS; ord = values .nextOrd()) { value = values.lookupOrd(ord); outValues.add(schemaField.getType().toObject(schemaField, value)); } assert outValues.size() > 0; doc.addField(fieldName, outValues); } } case NONE: break; } } }
From source file:org.apache.solr.uninverting.TestDocTermOrds.java
License:Apache License
public void testNumericEncoded32() throws IOException { Directory dir = newDirectory();/*from w w w .j ava2s . c om*/ IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null)); Document doc = new Document(); doc.add(new LegacyIntField("foo", 5, Field.Store.NO)); iw.addDocument(doc); doc = new Document(); doc.add(new LegacyIntField("foo", 5, Field.Store.NO)); doc.add(new LegacyIntField("foo", -3, Field.Store.NO)); iw.addDocument(doc); iw.forceMerge(1); iw.close(); DirectoryReader ir = DirectoryReader.open(dir); LeafReader ar = getOnlyLeafReader(ir); SortedSetDocValues v = FieldCache.DEFAULT.getDocTermOrds(ar, "foo", FieldCache.INT32_TERM_PREFIX); assertEquals(2, v.getValueCount()); assertEquals(0, v.nextDoc()); assertEquals(1, v.nextOrd()); assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd()); assertEquals(1, v.nextDoc()); assertEquals(0, v.nextOrd()); assertEquals(1, v.nextOrd()); assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd()); BytesRef value = v.lookupOrd(0); assertEquals(-3, LegacyNumericUtils.prefixCodedToInt(value)); value = v.lookupOrd(1); assertEquals(5, LegacyNumericUtils.prefixCodedToInt(value)); ir.close(); dir.close(); }
From source file:org.apache.solr.uninverting.TestDocTermOrds.java
License:Apache License
public void testNumericEncoded64() throws IOException { Directory dir = newDirectory();//from w w w . j a va 2s .com IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null)); Document doc = new Document(); doc.add(new LegacyLongField("foo", 5, Field.Store.NO)); iw.addDocument(doc); doc = new Document(); doc.add(new LegacyLongField("foo", 5, Field.Store.NO)); doc.add(new LegacyLongField("foo", -3, Field.Store.NO)); iw.addDocument(doc); iw.forceMerge(1); iw.close(); DirectoryReader ir = DirectoryReader.open(dir); LeafReader ar = getOnlyLeafReader(ir); SortedSetDocValues v = FieldCache.DEFAULT.getDocTermOrds(ar, "foo", FieldCache.INT64_TERM_PREFIX); assertEquals(2, v.getValueCount()); assertEquals(0, v.nextDoc()); assertEquals(1, v.nextOrd()); assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd()); assertEquals(1, v.nextDoc()); assertEquals(0, v.nextOrd()); assertEquals(1, v.nextOrd()); assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd()); BytesRef value = v.lookupOrd(0); assertEquals(-3, LegacyNumericUtils.prefixCodedToLong(value)); value = v.lookupOrd(1); assertEquals(5, LegacyNumericUtils.prefixCodedToLong(value)); ir.close(); dir.close(); }
From source file:org.apache.solr.uninverting.TestDocTermOrds.java
License:Apache License
public void testActuallySingleValued() throws IOException { Directory dir = newDirectory();// w w w .j a v a 2 s .c om IndexWriterConfig iwconfig = newIndexWriterConfig(null); iwconfig.setMergePolicy(newLogMergePolicy()); IndexWriter iw = new IndexWriter(dir, iwconfig); Document doc = new Document(); doc.add(new StringField("foo", "bar", Field.Store.NO)); iw.addDocument(doc); doc = new Document(); doc.add(new StringField("foo", "baz", Field.Store.NO)); iw.addDocument(doc); doc = new Document(); iw.addDocument(doc); doc = new Document(); doc.add(new StringField("foo", "baz", Field.Store.NO)); doc.add(new StringField("foo", "baz", Field.Store.NO)); iw.addDocument(doc); iw.forceMerge(1); iw.close(); DirectoryReader ir = DirectoryReader.open(dir); LeafReader ar = getOnlyLeafReader(ir); SortedSetDocValues v = FieldCache.DEFAULT.getDocTermOrds(ar, "foo", null); assertNotNull(DocValues.unwrapSingleton(v)); // actually a single-valued field assertEquals(2, v.getValueCount()); assertEquals(0, v.nextDoc()); assertEquals(0, v.nextOrd()); assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd()); assertEquals(1, v.nextDoc()); assertEquals(1, v.nextOrd()); assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd()); assertEquals(3, v.nextDoc()); assertEquals(1, v.nextOrd()); assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd()); BytesRef value = v.lookupOrd(0); assertEquals("bar", value.utf8ToString()); value = v.lookupOrd(1); assertEquals("baz", value.utf8ToString()); ir.close(); dir.close(); }
From source file:org.apache.solr.uninverting.TestFieldCache.java
License:Apache License
public void test() throws IOException { FieldCache cache = FieldCache.DEFAULT; NumericDocValues doubles = cache.getNumerics(reader, "theDouble", FieldCache.DOUBLE_POINT_PARSER); for (int i = 0; i < NUM_DOCS; i++) { assertEquals(i, doubles.nextDoc()); assertEquals(Double.doubleToLongBits(Double.MAX_VALUE - i), doubles.longValue()); }//from w w w. j a v a 2s. c om NumericDocValues longs = cache.getNumerics(reader, "theLong", FieldCache.LONG_POINT_PARSER); for (int i = 0; i < NUM_DOCS; i++) { assertEquals(i, longs.nextDoc()); assertEquals(Long.MAX_VALUE - i, longs.longValue()); } NumericDocValues ints = cache.getNumerics(reader, "theInt", FieldCache.INT_POINT_PARSER); for (int i = 0; i < NUM_DOCS; i++) { assertEquals(i, ints.nextDoc()); assertEquals(Integer.MAX_VALUE - i, ints.longValue()); } NumericDocValues floats = cache.getNumerics(reader, "theFloat", FieldCache.FLOAT_POINT_PARSER); for (int i = 0; i < NUM_DOCS; i++) { assertEquals(i, floats.nextDoc()); assertEquals(Float.floatToIntBits(Float.MAX_VALUE - i), floats.longValue()); } Bits docsWithField = cache.getDocsWithField(reader, "theLong", FieldCache.LONG_POINT_PARSER); assertSame("Second request to cache return same array", docsWithField, cache.getDocsWithField(reader, "theLong", FieldCache.LONG_POINT_PARSER)); assertTrue("docsWithField(theLong) must be class Bits.MatchAllBits", docsWithField instanceof Bits.MatchAllBits); assertTrue("docsWithField(theLong) Size: " + docsWithField.length() + " is not: " + NUM_DOCS, docsWithField.length() == NUM_DOCS); for (int i = 0; i < docsWithField.length(); i++) { assertTrue(docsWithField.get(i)); } docsWithField = cache.getDocsWithField(reader, "sparse", FieldCache.INT_POINT_PARSER); assertSame("Second request to cache return same array", docsWithField, cache.getDocsWithField(reader, "sparse", FieldCache.INT_POINT_PARSER)); assertFalse("docsWithField(sparse) must not be class Bits.MatchAllBits", docsWithField instanceof Bits.MatchAllBits); assertTrue("docsWithField(sparse) Size: " + docsWithField.length() + " is not: " + NUM_DOCS, docsWithField.length() == NUM_DOCS); for (int i = 0; i < docsWithField.length(); i++) { assertEquals(i % 2 == 0, docsWithField.get(i)); } // getTermsIndex SortedDocValues termsIndex = cache.getTermsIndex(reader, "theRandomUnicodeString"); for (int i = 0; i < NUM_DOCS; i++) { final String s; if (i > termsIndex.docID()) { termsIndex.advance(i); } if (i == termsIndex.docID()) { s = termsIndex.binaryValue().utf8ToString(); } else { s = null; } assertTrue("for doc " + i + ": " + s + " does not equal: " + unicodeStrings[i], unicodeStrings[i] == null || unicodeStrings[i].equals(s)); } int nTerms = termsIndex.getValueCount(); TermsEnum tenum = termsIndex.termsEnum(); for (int i = 0; i < nTerms; i++) { BytesRef val1 = BytesRef.deepCopyOf(tenum.next()); final BytesRef val = termsIndex.lookupOrd(i); // System.out.println("i="+i); assertEquals(val, val1); } // seek the enum around (note this isn't a great test here) int num = atLeast(100); for (int i = 0; i < num; i++) { int k = random().nextInt(nTerms); final BytesRef val = BytesRef.deepCopyOf(termsIndex.lookupOrd(k)); assertEquals(TermsEnum.SeekStatus.FOUND, tenum.seekCeil(val)); assertEquals(val, tenum.term()); } for (int i = 0; i < nTerms; i++) { final BytesRef val = BytesRef.deepCopyOf(termsIndex.lookupOrd(i)); assertEquals(TermsEnum.SeekStatus.FOUND, tenum.seekCeil(val)); assertEquals(val, tenum.term()); } // test bad field termsIndex = cache.getTermsIndex(reader, "bogusfield"); // getTerms BinaryDocValues terms = cache.getTerms(reader, "theRandomUnicodeString"); for (int i = 0; i < NUM_DOCS; i++) { if (terms.docID() < i) { terms.nextDoc(); } if (terms.docID() == i) { assertEquals(unicodeStrings[i], terms.binaryValue().utf8ToString()); } else { assertNull(unicodeStrings[i]); } } // test bad field terms = cache.getTerms(reader, "bogusfield"); // getDocTermOrds SortedSetDocValues termOrds = cache.getDocTermOrds(reader, "theRandomUnicodeMultiValuedField", null); int numEntries = cache.getCacheEntries().length; // ask for it again, and check that we didnt create any additional entries: termOrds = cache.getDocTermOrds(reader, "theRandomUnicodeMultiValuedField", null); assertEquals(numEntries, cache.getCacheEntries().length); for (int i = 0; i < NUM_DOCS; i++) { // This will remove identical terms. A DocTermOrds doesn't return duplicate ords for a docId List<BytesRef> values = new ArrayList<>(new LinkedHashSet<>(Arrays.asList(multiValued[i]))); for (BytesRef v : values) { if (v == null) { // why does this test use null values... instead of an empty list: confusing break; } if (i > termOrds.docID()) { assertEquals(i, termOrds.nextDoc()); } long ord = termOrds.nextOrd(); assert ord != SortedSetDocValues.NO_MORE_ORDS; BytesRef scratch = termOrds.lookupOrd(ord); assertEquals(v, scratch); } if (i == termOrds.docID()) { assertEquals(SortedSetDocValues.NO_MORE_ORDS, termOrds.nextOrd()); } } // test bad field termOrds = cache.getDocTermOrds(reader, "bogusfield", null); assertTrue(termOrds.getValueCount() == 0); FieldCache.DEFAULT.purgeByCacheKey(reader.getCoreCacheKey()); }
From source file:org.apache.solr.uninverting.TestFieldCacheVsDocValues.java
License:Apache License
private void assertEquals(int maxDoc, SortedSetDocValues expected, SortedSetDocValues actual) throws Exception { // can be null for the segment if no docs actually had any SortedDocValues // in this case FC.getDocTermsOrds returns EMPTY if (actual == null) { assertEquals(expected.getValueCount(), 0); return;//from ww w .j av a2s . co m } assertEquals(expected.getValueCount(), actual.getValueCount()); while (true) { int docID = expected.nextDoc(); assertEquals(docID, actual.nextDoc()); if (docID == NO_MORE_DOCS) { break; } long expectedOrd; while ((expectedOrd = expected.nextOrd()) != NO_MORE_ORDS) { assertEquals(expectedOrd, actual.nextOrd()); } assertEquals(NO_MORE_ORDS, actual.nextOrd()); } // compare ord dictionary for (long i = 0; i < expected.getValueCount(); i++) { final BytesRef expectedBytes = BytesRef.deepCopyOf(expected.lookupOrd(i)); final BytesRef actualBytes = actual.lookupOrd(i); assertEquals(expectedBytes, actualBytes); } // compare termsenum assertEquals(expected.getValueCount(), expected.termsEnum(), actual.termsEnum()); }
From source file:org.apache.solr.uninverting.TestUninvertingReader.java
License:Apache License
public void testSortedSetInteger() throws IOException { Directory dir = newDirectory();//from w w w . j av a 2s.co m IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null)); Document doc = new Document(); doc.add(new LegacyIntField("foo", 5, Field.Store.NO)); iw.addDocument(doc); doc = new Document(); doc.add(new LegacyIntField("foo", 5, Field.Store.NO)); doc.add(new LegacyIntField("foo", -3, Field.Store.NO)); iw.addDocument(doc); iw.forceMerge(1); iw.close(); DirectoryReader ir = UninvertingReader.wrap(DirectoryReader.open(dir), Collections.singletonMap("foo", Type.SORTED_SET_INTEGER)); LeafReader ar = ir.leaves().get(0).reader(); SortedSetDocValues v = ar.getSortedSetDocValues("foo"); assertEquals(2, v.getValueCount()); assertEquals(0, v.nextDoc()); assertEquals(1, v.nextOrd()); assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd()); assertEquals(1, v.nextDoc()); assertEquals(0, v.nextOrd()); assertEquals(1, v.nextOrd()); assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd()); BytesRef value = v.lookupOrd(0); assertEquals(-3, LegacyNumericUtils.prefixCodedToInt(value)); value = v.lookupOrd(1); assertEquals(5, LegacyNumericUtils.prefixCodedToInt(value)); TestUtil.checkReader(ir); ir.close(); dir.close(); }
From source file:org.apache.solr.uninverting.TestUninvertingReader.java
License:Apache License
public void testSortedSetFloat() throws IOException { Directory dir = newDirectory();//from w w w . ja v a 2 s . com IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null)); Document doc = new Document(); doc.add(new LegacyIntField("foo", Float.floatToRawIntBits(5f), Field.Store.NO)); iw.addDocument(doc); doc = new Document(); doc.add(new LegacyIntField("foo", Float.floatToRawIntBits(5f), Field.Store.NO)); doc.add(new LegacyIntField("foo", Float.floatToRawIntBits(-3f), Field.Store.NO)); iw.addDocument(doc); iw.forceMerge(1); iw.close(); DirectoryReader ir = UninvertingReader.wrap(DirectoryReader.open(dir), Collections.singletonMap("foo", Type.SORTED_SET_FLOAT)); LeafReader ar = ir.leaves().get(0).reader(); SortedSetDocValues v = ar.getSortedSetDocValues("foo"); assertEquals(2, v.getValueCount()); assertEquals(0, v.nextDoc()); assertEquals(1, v.nextOrd()); assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd()); assertEquals(1, v.nextDoc()); assertEquals(0, v.nextOrd()); assertEquals(1, v.nextOrd()); assertEquals(SortedSetDocValues.NO_MORE_ORDS, v.nextOrd()); BytesRef value = v.lookupOrd(0); assertEquals(Float.floatToRawIntBits(-3f), LegacyNumericUtils.prefixCodedToInt(value)); value = v.lookupOrd(1); assertEquals(Float.floatToRawIntBits(5f), LegacyNumericUtils.prefixCodedToInt(value)); TestUtil.checkReader(ir); ir.close(); dir.close(); }