Example usage for org.apache.lucene.search SortField FIELD_DOC

List of usage examples for org.apache.lucene.search SortField FIELD_DOC

Introduction

In this page you can find the example usage for org.apache.lucene.search SortField FIELD_DOC.

Prototype

SortField FIELD_DOC

To view the source code for org.apache.lucene.search SortField FIELD_DOC.

Click Source Link

Document

Represents sorting by document number (index order).

Usage

From source file:com.bewsia.script.safe.lucene.SEntity.java

License:Open Source License

public SortField sortFieldDoc() {
    return SortField.FIELD_DOC;
}

From source file:net.sf.logsaw.index.internal.LuceneIndexServiceImpl.java

License:Open Source License

@Override
public ResultPage query(IQueryContext context, final List<ARestriction<?>> restrictions, final int offset,
        final int limit) throws CoreException {
    Assert.isNotNull(context, "context"); //$NON-NLS-1$
    Assert.isTrue(context instanceof LuceneQueryContextImpl,
            "Query context must be of type net.sf.logsaw.index.impl.LuceneQueryContextImpl"); //$NON-NLS-1$
    Assert.isTrue(context.isOpen(), "Query context must be open"); //$NON-NLS-1$
    Assert.isNotNull(restrictions, "restrictions"); //$NON-NLS-1$

    ARunWithIndexReader<ResultPage> runnable = new ARunWithIndexReader<ResultPage>() {

        /* (non-Javadoc)
         * @see net.sf.logsaw.index.impl.ARunWithIndexReader#doRunWithIndexReader(org.apache.lucene.index.IndexReader, net.sf.logsaw.core.framework.ILogResource)
         */// ww  w . jav  a2s  .  com
        @Override
        protected ResultPage doRunWithIndexReader(IndexReader reader, ILogResource log) throws CoreException {
            if (reader == null) {
                // Index does not exist yet
                return new ResultPage();
            }

            try {
                IndexSearcher searcher = new IndexSearcher(reader);
                Sort sort = new Sort(new SortField[] { SortField.FIELD_DOC });
                TopFieldCollector collector = TopFieldCollector.create(sort, offset + limit, false, false,
                        false, true);
                // TODO Investigate use of searchAfter
                searcher.search(convertToQuery(restrictions), collector);
                List<LogEntry> result = new LinkedList<LogEntry>();
                collectHits(searcher, collector.topDocs(offset), log.getDialect(), result);
                return new ResultPage(result, offset, collector.getTotalHits());
            } catch (IOException e) {
                // Unexpected exception; wrap with CoreException
                throw new CoreException(new Status(IStatus.ERROR, IndexPlugin.PLUGIN_ID,
                        NLS.bind(Messages.LuceneIndexService_error_failedToReadIndex,
                                new Object[] { log.getName(), e.getLocalizedMessage() }),
                        e));
            }
        }
    };
    runnable.setQueryContext((LuceneQueryContextImpl) context);
    return runnable.runWithIndexReader(context.getLogResource());
}

From source file:org.apache.solr.uninverting.TestFieldCacheSort.java

License:Apache License

/** Tests sorting on internal docid order */
public void testFieldDoc() throws Exception {
    Directory dir = newDirectory();//w w  w. j  a  v  a2  s  . c  o  m
    RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
    Document doc = new Document();
    doc.add(newStringField("value", "foo", Field.Store.NO));
    writer.addDocument(doc);
    doc = new Document();
    doc.add(newStringField("value", "bar", Field.Store.NO));
    writer.addDocument(doc);
    IndexReader ir = writer.getReader();
    writer.close();

    IndexSearcher searcher = newSearcher(ir);
    Sort sort = new Sort(SortField.FIELD_DOC);

    TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
    assertEquals(2, td.totalHits);
    // docid 0, then docid 1
    assertEquals(0, td.scoreDocs[0].doc);
    assertEquals(1, td.scoreDocs[1].doc);
    TestUtil.checkReader(ir);
    ir.close();
    dir.close();
}

From source file:org.apache.solr.uninverting.TestFieldCacheSort.java

License:Apache License

/** test that we throw exception on multi-valued field, creates corrupt reader, use SORTED_SET instead */
public void testMultiValuedField() throws IOException {
    Directory indexStore = newDirectory();
    IndexWriter writer = new IndexWriter(indexStore, newIndexWriterConfig(new MockAnalyzer(random())));
    for (int i = 0; i < 5; i++) {
        Document doc = new Document();
        doc.add(new StringField("string", "a" + i, Field.Store.NO));
        doc.add(new StringField("string", "b" + i, Field.Store.NO));
        writer.addDocument(doc);/*from w ww. j a v  a 2 s.c o m*/
    }
    writer.forceMerge(1); // enforce one segment to have a higher unique term count in all cases
    writer.close();
    Sort sort = new Sort(new SortField("string", SortField.Type.STRING), SortField.FIELD_DOC);
    IndexReader reader = UninvertingReader.wrap(DirectoryReader.open(indexStore),
            Collections.singletonMap("string", Type.SORTED));
    IndexSearcher searcher = new IndexSearcher(reader);
    expectThrows(IllegalStateException.class, () -> {
        searcher.search(new MatchAllDocsQuery(), 500, sort);
    });
    reader.close();
    indexStore.close();
}

From source file:org.apache.solr.uninverting.TestFieldCacheSort.java

License:Apache License

/** test sorts when there's nothing in the index */
public void testEmptyIndex() throws Exception {
    IndexSearcher empty = newSearcher(new MultiReader());
    Query query = new TermQuery(new Term("contents", "foo"));

    Sort sort = new Sort();
    TopDocs td = empty.search(query, 10, sort, true, true);
    assertEquals(0, td.totalHits);/* w w  w.j a  va 2  s . c o  m*/

    sort.setSort(SortField.FIELD_DOC);
    td = empty.search(query, 10, sort, true, true);
    assertEquals(0, td.totalHits);

    sort.setSort(new SortField("int", SortField.Type.INT), SortField.FIELD_DOC);
    td = empty.search(query, 10, sort, true, true);
    assertEquals(0, td.totalHits);

    sort.setSort(new SortField("string", SortField.Type.STRING, true), SortField.FIELD_DOC);
    td = empty.search(query, 10, sort, true, true);
    assertEquals(0, td.totalHits);

    sort.setSort(new SortField("string_val", SortField.Type.STRING_VAL, true), SortField.FIELD_DOC);
    td = empty.search(query, 10, sort, true, true);
    assertEquals(0, td.totalHits);

    sort.setSort(new SortField("float", SortField.Type.FLOAT), new SortField("string", SortField.Type.STRING));
    td = empty.search(query, 10, sort, true, true);
    assertEquals(0, td.totalHits);
}

From source file:org.apache.solr.uninverting.TestFieldCacheSortRandom.java

License:Apache License

private void testRandomStringSort(SortField.Type type) throws Exception {
    Random random = new Random(random().nextLong());

    final int NUM_DOCS = atLeast(100);
    final Directory dir = newDirectory();
    final RandomIndexWriter writer = new RandomIndexWriter(random, dir);
    final boolean allowDups = random.nextBoolean();
    final Set<String> seen = new HashSet<>();
    final int maxLength = TestUtil.nextInt(random, 5, 100);
    if (VERBOSE) {
        System.out//  w  w  w .j  av  a 2  s .  co m
                .println("TEST: NUM_DOCS=" + NUM_DOCS + " maxLength=" + maxLength + " allowDups=" + allowDups);
    }

    int numDocs = 0;
    final List<BytesRef> docValues = new ArrayList<>();
    // TODO: deletions
    while (numDocs < NUM_DOCS) {
        final Document doc = new Document();

        // 10% of the time, the document is missing the value:
        final BytesRef br;
        if (random().nextInt(10) != 7) {
            final String s;
            if (random.nextBoolean()) {
                s = TestUtil.randomSimpleString(random, maxLength);
            } else {
                s = TestUtil.randomUnicodeString(random, maxLength);
            }

            if (!allowDups) {
                if (seen.contains(s)) {
                    continue;
                }
                seen.add(s);
            }

            if (VERBOSE) {
                System.out.println("  " + numDocs + ": s=" + s);
            }

            doc.add(new StringField("stringdv", s, Field.Store.NO));
            docValues.add(new BytesRef(s));

        } else {
            br = null;
            if (VERBOSE) {
                System.out.println("  " + numDocs + ": <missing>");
            }
            docValues.add(null);
        }

        doc.add(new IntPoint("id", numDocs));
        doc.add(new StoredField("id", numDocs));
        writer.addDocument(doc);
        numDocs++;

        if (random.nextInt(40) == 17) {
            // force flush
            writer.getReader().close();
        }
    }

    Map<String, UninvertingReader.Type> mapping = new HashMap<>();
    mapping.put("stringdv", Type.SORTED);
    mapping.put("id", Type.INTEGER_POINT);
    final IndexReader r = UninvertingReader.wrap(writer.getReader(), mapping);
    writer.close();
    if (VERBOSE) {
        System.out.println("  reader=" + r);
    }

    final IndexSearcher s = newSearcher(r, false);
    final int ITERS = atLeast(100);
    for (int iter = 0; iter < ITERS; iter++) {
        final boolean reverse = random.nextBoolean();

        final TopFieldDocs hits;
        final SortField sf;
        final boolean sortMissingLast;
        final boolean missingIsNull;
        sf = new SortField("stringdv", type, reverse);
        sortMissingLast = random().nextBoolean();
        missingIsNull = true;

        if (sortMissingLast) {
            sf.setMissingValue(SortField.STRING_LAST);
        }

        final Sort sort;
        if (random.nextBoolean()) {
            sort = new Sort(sf);
        } else {
            sort = new Sort(sf, SortField.FIELD_DOC);
        }
        final int hitCount = TestUtil.nextInt(random, 1, r.maxDoc() + 20);
        final RandomQuery f = new RandomQuery(random.nextLong(), random.nextFloat(), docValues);
        int queryType = random.nextInt(2);
        if (queryType == 0) {
            hits = s.search(new ConstantScoreQuery(f), hitCount, sort, random.nextBoolean(),
                    random.nextBoolean());
        } else {
            hits = s.search(f, hitCount, sort, random.nextBoolean(), random.nextBoolean());
        }

        if (VERBOSE) {
            System.out.println("\nTEST: iter=" + iter + " " + hits.totalHits + " hits; topN=" + hitCount
                    + "; reverse=" + reverse + "; sortMissingLast=" + sortMissingLast + " sort=" + sort);
        }

        // Compute expected results:
        Collections.sort(f.matchValues, new Comparator<BytesRef>() {
            @Override
            public int compare(BytesRef a, BytesRef b) {
                if (a == null) {
                    if (b == null) {
                        return 0;
                    }
                    if (sortMissingLast) {
                        return 1;
                    } else {
                        return -1;
                    }
                } else if (b == null) {
                    if (sortMissingLast) {
                        return -1;
                    } else {
                        return 1;
                    }
                } else {
                    return a.compareTo(b);
                }
            }
        });

        if (reverse) {
            Collections.reverse(f.matchValues);
        }
        final List<BytesRef> expected = f.matchValues;
        if (VERBOSE) {
            System.out.println("  expected:");
            for (int idx = 0; idx < expected.size(); idx++) {
                BytesRef br = expected.get(idx);
                if (br == null && missingIsNull == false) {
                    br = new BytesRef();
                }
                System.out.println("    " + idx + ": " + (br == null ? "<missing>" : br.utf8ToString()));
                if (idx == hitCount - 1) {
                    break;
                }
            }
        }

        if (VERBOSE) {
            System.out.println("  actual:");
            for (int hitIDX = 0; hitIDX < hits.scoreDocs.length; hitIDX++) {
                final FieldDoc fd = (FieldDoc) hits.scoreDocs[hitIDX];
                BytesRef br = (BytesRef) fd.fields[0];

                System.out.println("    " + hitIDX + ": " + (br == null ? "<missing>" : br.utf8ToString())
                        + " id=" + s.doc(fd.doc).get("id"));
            }
        }
        for (int hitIDX = 0; hitIDX < hits.scoreDocs.length; hitIDX++) {
            final FieldDoc fd = (FieldDoc) hits.scoreDocs[hitIDX];
            BytesRef br = expected.get(hitIDX);
            if (br == null && missingIsNull == false) {
                br = new BytesRef();
            }

            // Normally, the old codecs (that don't support
            // docsWithField via doc values) will always return
            // an empty BytesRef for the missing case; however,
            // if all docs in a given segment were missing, in
            // that case it will return null!  So we must map
            // null here, too:
            BytesRef br2 = (BytesRef) fd.fields[0];
            if (br2 == null && missingIsNull == false) {
                br2 = new BytesRef();
            }

            assertEquals(br, br2);
        }
    }

    r.close();
    dir.close();
}

From source file:org.elasticsearch.index.query.TermsSetQueryBuilderTests.java

License:Apache License

public void testDoToQuery() throws Exception {
    try (Directory directory = newDirectory()) {
        IndexWriterConfig config = new IndexWriterConfig(new WhitespaceAnalyzer());
        config.setMergePolicy(NoMergePolicy.INSTANCE);
        try (IndexWriter iw = new IndexWriter(directory, config)) {
            Document document = new Document();
            document.add(new TextField("message", "a b", Field.Store.NO));
            document.add(new SortedNumericDocValuesField("m_s_m", 1));
            iw.addDocument(document);//from  ww w .  ja va 2s.c o  m

            document = new Document();
            document.add(new TextField("message", "a b c", Field.Store.NO));
            document.add(new SortedNumericDocValuesField("m_s_m", 1));
            iw.addDocument(document);

            document = new Document();
            document.add(new TextField("message", "a b c", Field.Store.NO));
            document.add(new SortedNumericDocValuesField("m_s_m", 2));
            iw.addDocument(document);

            document = new Document();
            document.add(new TextField("message", "a b c d", Field.Store.NO));
            document.add(new SortedNumericDocValuesField("m_s_m", 1));
            iw.addDocument(document);

            document = new Document();
            document.add(new TextField("message", "a b c d", Field.Store.NO));
            document.add(new SortedNumericDocValuesField("m_s_m", 2));
            iw.addDocument(document);

            document = new Document();
            document.add(new TextField("message", "a b c d", Field.Store.NO));
            document.add(new SortedNumericDocValuesField("m_s_m", 3));
            iw.addDocument(document);
        }

        try (IndexReader ir = DirectoryReader.open(directory)) {
            QueryShardContext context = createShardContext();
            Query query = new TermsSetQueryBuilder("message", Arrays.asList("c", "d"))
                    .setMinimumShouldMatchField("m_s_m").doToQuery(context);
            IndexSearcher searcher = new IndexSearcher(ir);
            TopDocs topDocs = searcher.search(query, 10, new Sort(SortField.FIELD_DOC));
            assertThat(topDocs.totalHits, equalTo(3L));
            assertThat(topDocs.scoreDocs[0].doc, equalTo(1));
            assertThat(topDocs.scoreDocs[1].doc, equalTo(3));
            assertThat(topDocs.scoreDocs[2].doc, equalTo(4));
        }
    }
}

From source file:org.elasticsearch.index.query.TermsSetQueryBuilderTests.java

License:Apache License

public void testDoToQuery_msmScriptField() throws Exception {
    try (Directory directory = newDirectory()) {
        IndexWriterConfig config = new IndexWriterConfig(new WhitespaceAnalyzer());
        config.setMergePolicy(NoMergePolicy.INSTANCE);
        try (IndexWriter iw = new IndexWriter(directory, config)) {
            Document document = new Document();
            document.add(new TextField("message", "a b x y", Field.Store.NO));
            document.add(new SortedNumericDocValuesField("m_s_m", 50));
            iw.addDocument(document);/*  w w  w.j  a  v a2s . c  o  m*/

            document = new Document();
            document.add(new TextField("message", "a b x y", Field.Store.NO));
            document.add(new SortedNumericDocValuesField("m_s_m", 75));
            iw.addDocument(document);

            document = new Document();
            document.add(new TextField("message", "a b c x", Field.Store.NO));
            document.add(new SortedNumericDocValuesField("m_s_m", 75));
            iw.addDocument(document);

            document = new Document();
            document.add(new TextField("message", "a b c x", Field.Store.NO));
            document.add(new SortedNumericDocValuesField("m_s_m", 100));
            iw.addDocument(document);

            document = new Document();
            document.add(new TextField("message", "a b c d", Field.Store.NO));
            document.add(new SortedNumericDocValuesField("m_s_m", 100));
            iw.addDocument(document);
        }

        try (IndexReader ir = DirectoryReader.open(directory)) {
            QueryShardContext context = createShardContext();
            Script script = new Script(ScriptType.INLINE, MockScriptEngine.NAME, "_script",
                    Collections.emptyMap());
            Query query = new TermsSetQueryBuilder("message", Arrays.asList("a", "b", "c", "d"))
                    .setMinimumShouldMatchScript(script).doToQuery(context);
            IndexSearcher searcher = new IndexSearcher(ir);
            TopDocs topDocs = searcher.search(query, 10, new Sort(SortField.FIELD_DOC));
            assertThat(topDocs.totalHits, equalTo(3L));
            assertThat(topDocs.scoreDocs[0].doc, equalTo(0));
            assertThat(topDocs.scoreDocs[1].doc, equalTo(2));
            assertThat(topDocs.scoreDocs[2].doc, equalTo(4));
        }
    }
}

From source file:org.elasticsearch.percolator.PercolatorMatchedSlotSubFetchPhase.java

License:Apache License

@Override
public void hitsExecute(SearchContext context, SearchHit[] hits) throws IOException {
    List<PercolateQuery> percolateQueries = locatePercolatorQuery(context.query());
    if (percolateQueries.isEmpty()) {
        return;//from  www . j a v a2  s .c o  m
    }

    boolean singlePercolateQuery = percolateQueries.size() == 1;
    for (PercolateQuery percolateQuery : percolateQueries) {
        String fieldName = singlePercolateQuery ? FIELD_NAME_PREFIX
                : FIELD_NAME_PREFIX + "_" + percolateQuery.getName();
        IndexSearcher percolatorIndexSearcher = percolateQuery.getPercolatorIndexSearcher();
        Weight weight = percolatorIndexSearcher.createNormalizedWeight(Queries.newNonNestedFilter(), false);
        Scorer s = weight.scorer(percolatorIndexSearcher.getIndexReader().leaves().get(0));
        int memoryIndexMaxDoc = percolatorIndexSearcher.getIndexReader().maxDoc();
        BitSet rootDocs = BitSet.of(s.iterator(), memoryIndexMaxDoc);
        int[] rootDocsBySlot = null;
        boolean hasNestedDocs = rootDocs.cardinality() != percolatorIndexSearcher.getIndexReader().numDocs();
        if (hasNestedDocs) {
            rootDocsBySlot = buildRootDocsSlots(rootDocs);
        }

        PercolateQuery.QueryStore queryStore = percolateQuery.getQueryStore();
        List<LeafReaderContext> ctxs = context.searcher().getIndexReader().leaves();
        for (SearchHit hit : hits) {
            LeafReaderContext ctx = ctxs.get(ReaderUtil.subIndex(hit.docId(), ctxs));
            int segmentDocId = hit.docId() - ctx.docBase;
            Query query = queryStore.getQueries(ctx).apply(segmentDocId);

            TopDocs topDocs = percolatorIndexSearcher.search(query, memoryIndexMaxDoc,
                    new Sort(SortField.FIELD_DOC));
            if (topDocs.totalHits == 0) {
                // This hit didn't match with a percolate query,
                // likely to happen when percolating multiple documents
                continue;
            }

            Map<String, DocumentField> fields = hit.fieldsOrNull();
            if (fields == null) {
                fields = new HashMap<>();
                hit.fields(fields);
            }
            IntStream slots = convertTopDocsToSlots(topDocs, rootDocsBySlot);
            fields.put(fieldName, new DocumentField(fieldName, slots.boxed().collect(Collectors.toList())));
        }
    }
}

From source file:org.hibernate.search.backend.lucene.search.sort.impl.IndexOrderSortContributor.java

License:LGPL

@Override
public void buildAndAddTo(LuceneSearchSortCollector collector) {
    collector.collectSortField(SortField.FIELD_DOC);
}