Example usage for org.apache.lucene.search IndexSearcher search

List of usage examples for org.apache.lucene.search IndexSearcher search

Introduction

In this page you can find the example usage for org.apache.lucene.search IndexSearcher search.

Prototype

public <C extends Collector, T> T search(Query query, CollectorManager<C, T> collectorManager)
        throws IOException 

Source Link

Document

Lower-level search API.

Usage

From source file:choco.lucene.IKAnalyzerDemo.java

License:Apache License

public static void main(String[] args) {
    //Lucene Document?? 
    String fieldName = "text";
    // /*from www  . java2  s.  com*/
    String text = "IK Analyzer???????";
    //IKAnalyzer? 
    Analyzer analyzer = new IKAnalyzer();
    Directory directory = null;
    IndexWriter iwriter = null;
    IndexReader ireader = null;
    IndexSearcher isearcher = null;
    try {
        // 
        directory = new RAMDirectory();
        //?IndexWriterConfig 
        IndexWriterConfig iwConfig = new IndexWriterConfig(Version.LUCENE_34, analyzer);
        iwConfig.setOpenMode(OpenMode.CREATE_OR_APPEND);
        iwriter = new IndexWriter(directory, iwConfig);
        //          
        Document doc = new Document();
        doc.add(new Field("ID", "10000", Field.Store.YES, Field.Index.NOT_ANALYZED));
        doc.add(new Field(fieldName, text, Field.Store.YES, Field.Index.ANALYZED));
        iwriter.addDocument(doc);
        iwriter.close();
        //?********************************** 
        //? 
        ireader = IndexReader.open(directory);
        isearcher = new IndexSearcher(ireader);
        String keyword = "?";
        //QueryParser?Query 
        QueryParser qp = new QueryParser(Version.LUCENE_34, fieldName, analyzer);
        qp.setDefaultOperator(QueryParser.AND_OPERATOR);
        Query query = qp.parse(keyword);
        //?5? 
        TopDocs topDocs = isearcher.search(query, 5);
        System.out.println("" + topDocs.totalHits);
        // 
        ScoreDoc[] scoreDocs = topDocs.scoreDocs;
        for (int i = 0; i < topDocs.totalHits; i++) {
            Document targetDoc = isearcher.doc(scoreDocs[i].doc);
            System.out.println("" + targetDoc.toString());
        }
    } catch (CorruptIndexException e) {
        e.printStackTrace();
    } catch (LockObtainFailedException e) {
        e.printStackTrace();
    } catch (IOException e) {
        e.printStackTrace();
    } catch (ParseException e) {
        e.printStackTrace();
    } finally {
        if (ireader != null) {
            try {
                ireader.close();
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
        if (directory != null) {
            try {
                directory.close();
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
    }
}

From source file:ci6226.eval_index_reader.java

public static void Searchit(IndexReader reader, IndexSearcher searcher, Analyzer _analyzer, String field,
        String[] _searchList, int _topn, PrintWriter writer)
        throws org.apache.lucene.queryparser.classic.ParseException, IOException, InvalidTokenOffsetsException {
    Analyzer analyzer = _analyzer;//from  w  ww.j  a va 2s . c  om

    QueryParser parser = new QueryParser(Version.LUCENE_47, field, analyzer);

    String[] testString = _searchList;//{"to","123","impressed","Geezer","geezer","semi-busy","\"eggs vegetable\"","gs veget","\"gs veget\""};//,"good","I","but","coffee"};

    for (int j = 0; j < testString.length; j++) {
        String lstr = String.valueOf(j) + "," + testString[j];
        Query query = parser.parse(testString[j]);
        System.out.println("Searching for: " + query.toString(field));
        TopDocs topdocs = searcher.search(query, _topn);
        lstr += "," + topdocs.totalHits;
        ScoreDoc[] scoreDocs = topdocs.scoreDocs;
        SimpleHTMLFormatter htmlFormatter = new SimpleHTMLFormatter();
        Highlighter highlighter = new Highlighter(htmlFormatter, new QueryScorer(query.rewrite(reader)));
        for (int i = 0; i < scoreDocs.length; i++) {
            int doc = scoreDocs[i].doc;
            Document document = searcher.doc(doc);
            //      System.out.println("Snippet=" + document.get(field));
            System.out.println(i);
            String text = document.get(field);
            TokenStream tokenStream = TokenSources.getAnyTokenStream(searcher.getIndexReader(), doc, field,
                    analyzer);
            TextFragment[] frag = highlighter.getBestTextFragments(tokenStream, text, false, 10);//highlighter.getBestFragments(tokenStream, text, 3, "...");
            String line = "";
            for (int m = 0; m < frag.length; m++) {

                if ((frag[m] != null) && (frag[m].getScore() > 0)) {
                    System.out.println((frag[m].toString()));
                    line = frag[m].toString();
                    line = line.replaceAll("\n", "");
                    line = line.replaceAll("\r", "");
                    line = line.replaceAll("\"", "");
                    line = line.replaceAll(",", " ");

                }

            }
            lstr += "," + line;
            lstr += "," + String.valueOf(scoreDocs[i].score);

        }
        writer.write(lstr + "\n");
        System.out.println("Search for:" + testString[j] + " Total hits=" + scoreDocs.length);
        System.out.println("////////////////////////////////////////////////////");
    }

}

From source file:ci6226.loadIndex.java

/**
 * This demonstrates a typical paging search scenario, where the search
 * engine presents pages of size n to the user. The user can then go to the
 * next page if interested in the next hits.
 *
 * When the query is executed for the first time, then only enough results
 * are collected to fill 5 result pages. If the user wants to page beyond
 * this limit, then the query is executed another time and all hits are
 * collected./*  ww w .  jav a 2 s.  c o m*/
 *
 */
public static void doPagingSearch(BufferedReader in, IndexSearcher searcher, Query query, int hitsPerPage,
        boolean raw, boolean interactive, Analyzer analyzer) throws IOException, InvalidTokenOffsetsException {

    // Collect enough docs to show 5 pages
    TopDocs results = searcher.search(query, 5 * hitsPerPage);
    ScoreDoc[] hits = results.scoreDocs;

    int numTotalHits = results.totalHits;
    System.out.println(numTotalHits + " total matching documents");

    int start = 0;
    int end = Math.min(numTotalHits, hitsPerPage);

    while (true) {
        if (end > hits.length) {
            System.out.println("Only results 1 - " + hits.length + " of " + numTotalHits
                    + " total matching documents collected.");
            System.out.println("Collect more (y/n) ?");
            String line = in.readLine();
            if (line.length() == 0 || line.charAt(0) == 'n') {
                break;
            }

            hits = searcher.search(query, numTotalHits).scoreDocs;
        }

        end = Math.min(hits.length, start + hitsPerPage);

        for (int i = start; i < end; i++) {
            if (raw) { // output raw format
                System.out.println("doc=" + hits[i].doc + " score=" + hits[i].score);
                continue;
            }

            Document doc = searcher.doc(hits[i].doc);
            String path = doc.get("review_id");
            if (path != null) {
                System.out.println(ANSI_BLUE + (i + 1) + ANSI_RESET + "\nScore=\t" + hits[i].score);
                String title = doc.get("business_id");
                if (title != null) {

                    String text = doc.get("text");
                    TokenStream tokenStream = TokenSources.getAnyTokenStream(searcher.getIndexReader(),
                            hits[i].doc, "text", doc, analyzer);//TokenSources.getAnyTokenStream(searcher.getIndexReader() ,"text", analyzer);
                    SimpleHTMLFormatter htmlFormatter = new SimpleHTMLFormatter(ANSI_RED, ANSI_RESET);
                    // SimpleFragmenter fragmenter = new SimpleFragmenter(80);
                    Highlighter highlighter = new Highlighter(htmlFormatter, new QueryScorer(query));
                    TextFragment[] frag = highlighter.getBestTextFragments(tokenStream, text, false, 4);
                    System.out.print("Snippet=\t");
                    for (int j = 0; j < frag.length; j++) {
                        if ((frag[j] != null) && (frag[j].getScore() > 0)) {
                            System.out.println((frag[j].toString()));
                        }
                    }
                    //System.out.print("\n");
                    System.out.println("Full Review=\t" + doc.get("text") + "\nBusinessID=\t" + title);
                }
            } else {
                System.out.println((i + 1) + ". " + "No path for this document");
            }

        }

        if (!interactive || end == 0) {
            break;
        }

        if (numTotalHits >= end) {
            boolean quit = false;
            while (true) {

                System.out.print("Press ");
                if (start - hitsPerPage >= 0) {
                    System.out.print("(p)revious page, ");
                }
                if (start + hitsPerPage < numTotalHits) {
                    System.out.print("(n)ext page, ");
                }
                System.out.println("(q)uit or enter number to jump to a page.");
                int cpage = start / hitsPerPage;
                System.out.println(String.format("Current page=%d,max page=%d", cpage + 1,
                        1 + numTotalHits / hitsPerPage));
                String line = in.readLine();
                if (line.length() == 0 || line.charAt(0) == 'q') {
                    quit = true;
                    break;
                }
                if (line.charAt(0) == 'p') {
                    start = Math.max(0, start - hitsPerPage);
                    break;
                } else if (line.charAt(0) == 'n') {
                    if (start + hitsPerPage < numTotalHits) {
                        start += hitsPerPage;
                    }
                    break;
                } else {
                    int page = Integer.parseInt(line);
                    if ((page - 1) * hitsPerPage < numTotalHits) {
                        start = (page - 1) * hitsPerPage;
                        break;
                    } else {
                        System.out.println("No such page");
                    }
                }
            }
            if (quit) {
                break;
            }
            end = Math.min(numTotalHits, start + hitsPerPage);
        }
    }

}

From source file:cn.codepub.redis.directory.Main.java

License:Apache License

public static void testRedisDirectoryWithShardedJedisPool() throws IOException {
    long start = System.currentTimeMillis();
    IndexWriterConfig indexWriterConfig = new IndexWriterConfig(new WhitespaceAnalyzer())
            .setOpenMode(IndexWriterConfig.OpenMode.CREATE);
    //indexWriterConfig.setInfoStream(System.out);
    //indexWriterConfig.setRAMBufferSizeMB(2048);
    //LogByteSizeMergePolicy logByteSizeMergePolicy = new LogByteSizeMergePolicy();
    //logByteSizeMergePolicy.setMinMergeMB(1);
    //logByteSizeMergePolicy.setMaxMergeMB(64);
    //logByteSizeMergePolicy.setMaxCFSSegmentSizeMB(64);
    //indexWriterConfig.setRAMBufferSizeMB(1024).setMergePolicy(logByteSizeMergePolicy).setUseCompoundFile(false);
    //GenericObjectPoolConfig genericObjectPoolConfig = new GenericObjectPoolConfig();
    //?/*from  ww w  . j av a 2s .c  o  m*/
    //genericObjectPoolConfig.setMaxWaitMillis(3000);
    //10s
    List<JedisShardInfo> shards = new ArrayList<>();
    JedisShardInfo si = new JedisShardInfo("localhost", 6379, Constants.TIME_OUT);
    //JedisShardInfo si2 = new JedisShardInfo("localhost", 6380);
    shards.add(si);
    //shards.add(si2);
    JedisPoolConfig jedisPoolConfig = new JedisPoolConfig();
    ShardedJedisPool shardedJedisPool = new ShardedJedisPool(jedisPoolConfig, shards);
    RedisDirectory redisDirectory = new RedisDirectory(new ShardedJedisPoolStream(shardedJedisPool));
    IndexWriter indexWriter = new IndexWriter(redisDirectory, indexWriterConfig);
    for (int i = 0; i < 10000000; i++) {
        indexWriter.addDocument(addDocument(i));
    }
    indexWriter.commit();
    indexWriter.close();
    redisDirectory.close();
    long end = System.currentTimeMillis();
    log.error("RedisDirectoryWithShardedJedisPool consumes {}s!", (end - start) / 1000);
    shardedJedisPool = new ShardedJedisPool(jedisPoolConfig, shards);
    start = System.currentTimeMillis();
    IndexSearcher indexSearcher = new IndexSearcher(
            DirectoryReader.open(new RedisDirectory(new ShardedJedisPoolStream(shardedJedisPool))));
    int total = 0;
    for (int i = 0; i < 10000000; i++) {
        TermQuery key1 = new TermQuery(new Term("key1", "key" + i));
        TopDocs search = indexSearcher.search(key1, 10);
        total += search.totalHits;
    }
    System.out.println(total);
    end = System.currentTimeMillis();
    log.error("RedisDirectoryWithShardedJedisPool search consumes {}ms!", (end - start));
}

From source file:cn.fql.blogspider.SearchMain.java

License:Open Source License

public static void doPagingSearch(BufferedReader in, IndexSearcher searcher, Query query, int hitsPerPage,
        boolean interactive) throws IOException {
    TopDocs results = searcher.search(query, 5 * hitsPerPage);
    ScoreDoc[] hits = results.scoreDocs;

    int numTotalHits = results.totalHits;
    System.out.println(numTotalHits + " total matching documents");

    int start = 0;
    int end = Math.min(numTotalHits, hitsPerPage);
    while (true) {
        do {/*from  w w w .j av a 2s  .  c  o m*/
            if (end > hits.length) {
                System.out.println("Only results 1 - " + hits.length + " of " + numTotalHits
                        + " total matching documents collected.");
                System.out.println("Collect more (y/n) ?");
                String line = in.readLine();
                if (line.length() == 0)
                    return;
                if (line.charAt(0) == 'n') {
                    return;
                }

                hits = searcher.search(query, numTotalHits).scoreDocs;
            }

            end = Math.min(hits.length, start + hitsPerPage);

            for (int i = start; i < end; ++i) {

                Document doc = searcher.doc(hits[i].doc);
                String path = doc.get("path");
                //String modifyDate=doc.get("modified");
                System.out.println(
                        "doc=" + hits[i].doc + " score=" + hits[i].score + "------" + (i + 1) + ". " + path);
            }

            if (!(interactive))
                return;
            if (end == 0)
                return;
        }

        while (numTotalHits < end);
        boolean quit = false;
        while (true) {
            System.out.print("Press ");
            if (start - hitsPerPage >= 0)
                System.out.print("(p)revious page, ");

            if (start + hitsPerPage < numTotalHits)
                System.out.print("(n)ext page, ");

            System.out.println("(q)uit or enter number to jump to a page.");

            String line = in.readLine();
            if ((line.length() == 0) || (line.charAt(0) == 'q')) {
                quit = true;
                break;
            }
            if (line.charAt(0) == 'p') {
                start = Math.max(0, start - hitsPerPage);
                break;
            }
            if (line.charAt(0) == 'n') {
                if (start + hitsPerPage >= numTotalHits)
                    break;
                start += hitsPerPage;
                break;
            }

            int page = Integer.parseInt(line);
            if ((page - 1) * hitsPerPage < numTotalHits) {
                start = (page - 1) * hitsPerPage;
                break;
            }
            System.out.println("No such page");
        }

        if (quit)
            return;
        end = Math.min(numTotalHits, start + hitsPerPage);
    }
}

From source file:cn.jcenterhome.web.action.CpAction.java

private List<String> getKeyWord(String text) throws IOException {
    List<String> keywords = new ArrayList<String>();
    if (!Common.empty(text)) {
        Map<String, Integer> words = new HashMap<String, Integer>();
        Analyzer analyzer = new IKAnalyzer(true);
        StringReader reader = new StringReader(text);
        TokenStream tokenStream = analyzer.tokenStream("*", reader);
        TermAttribute termAtt = (TermAttribute) tokenStream.getAttribute(TermAttribute.class);
        while (tokenStream.incrementToken()) {
            String word = termAtt.term();
            if (word.length() > 1 && Common.strlen(word) > 2) {
                Integer count = words.get(word);
                if (count == null) {
                    count = 0;//from  w  ww . ja  v a  2 s . c om
                }
                words.put(word, count + 1);
            }
        }
        if (words.size() > 0) {
            Directory dir = null;
            IndexSearcher searcher = null;
            try {
                String fieldName = "text";
                dir = new RAMDirectory();
                IndexWriter writer = new IndexWriter(dir, analyzer, true, IndexWriter.MaxFieldLength.LIMITED);
                Document doc = new Document();
                doc.add(new Field(fieldName, text, Field.Store.YES, Field.Index.ANALYZED));
                writer.addDocument(doc);
                writer.close();
                searcher = new IndexSearcher(dir);
                searcher.setSimilarity(new IKSimilarity());
                Set<String> keys = words.keySet();
                Map<String, Float> temps = new HashMap<String, Float>();
                for (String key : keys) {
                    int count = words.get(key);
                    Query query = IKQueryParser.parse(fieldName, key);
                    TopDocs topDocs = searcher.search(query, 1);
                    if (topDocs.totalHits > 0) {
                        temps.put(key, topDocs.getMaxScore() * count);
                    }
                }
                Entry<String, Float>[] keywordEntry = getSortedHashtableByValue(temps);
                for (Entry<String, Float> entry : keywordEntry) {
                    if (keywords.size() < 5) {
                        keywords.add(entry.getKey());
                    }
                }
            } catch (Exception e) {
                e.printStackTrace();
            } finally {
                try {
                    searcher.close();
                } catch (IOException e) {
                    e.printStackTrace();
                }
                try {
                    dir.close();
                } catch (IOException e) {
                    e.printStackTrace();
                }
            }
        }
    }
    return keywords;
}

From source file:collene.Freedb.java

License:Apache License

public static void DoSearch(Directory directory) throws Exception {

    out.println("I think these are the files:");
    for (String s : directory.listAll()) {
        out.println(s);//from   w  ww.  j a  v a 2s. c  om
    }

    IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(directory));
    Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_4_9);
    QueryParser parser = new QueryParser(Version.LUCENE_4_9, "any", analyzer);
    for (int i = 0; i < 5; i++) {
        long searchStart = System.currentTimeMillis();
        Query query = parser.parse("morrissey");
        //Query query = parser.parse("Dance");
        TopDocs docs = searcher.search(query, 10);
        long searchEnd = System.currentTimeMillis();
        out.println(String.format("%s %d total hits in %d", directory.getClass().getSimpleName(),
                docs.totalHits, searchEnd - searchStart));
        long lookupStart = System.currentTimeMillis();
        for (ScoreDoc d : docs.scoreDocs) {
            Document doc = searcher.doc(d.doc);
            out.println(String.format("%d %.2f %d %s", d.doc, d.score, d.shardIndex,
                    doc.getField("any").stringValue()));
        }
        long lookupEnd = System.currentTimeMillis();
        out.println(String.format("Document lookup took %d ms for %d documents", lookupEnd - lookupStart,
                docs.scoreDocs.length));
    }

    directory.close();
}

From source file:collene.Freedb.java

License:Apache License

public static void BuildIndex(Directory directory) throws Exception {
    String freedbPath = "/Users/gdusbabek/Downloads/freedb-complete-20140701.tar.bz2";

    if (directory == null) {
        System.out.println("Need to specify: { memory | file | cassandra }. Did you misspell something?");
        System.exit(-1);/*from www .  j  av a  2  s . co m*/
    }

    FreeDbReader reader = new FreeDbReader(new File(freedbPath), 50000);
    reader.start();

    long indexStart = System.currentTimeMillis();
    Collection<Document> documents = new ArrayList<Document>(BATCH_SIZE);
    Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_4_9);
    IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_4_9, analyzer);
    config.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
    IndexWriter writer = new IndexWriter(directory, config);

    // stop after this many documents.
    final int maxDocuments = 400000; //Integer.MAX_VALUE;

    FreeDbEntry entry = reader.next();
    int count = 0;
    while (entry != null && count < maxDocuments) {
        Document doc = new Document();
        String any = entry.toString();
        doc.add(new Field("any", any, TextField.TYPE_STORED));
        doc.add(new Field("artist", entry.getArtist(), TextField.TYPE_NOT_STORED));
        doc.add(new Field("album", entry.getAlbum(), TextField.TYPE_NOT_STORED));
        doc.add(new Field("title", entry.getTitle(), TextField.TYPE_NOT_STORED));
        doc.add(new Field("genre", entry.getGenre(), TextField.TYPE_NOT_STORED));
        doc.add(new Field("year", entry.getYear(), TextField.TYPE_NOT_STORED));
        for (int i = 0; i < entry.getTrackCount(); i++) {
            doc.add(new Field("track", entry.getTrack(i), TextField.TYPE_STORED));
        }
        documents.add(doc);
        if (VERBOSE) {
            out.println(any);
        }

        if (documents.size() == BATCH_SIZE) {
            //out.println(String.format("Adding batch at count %d", count));
            writer.addDocuments(documents);
            //out.println("done");
            documents.clear();
        }

        count += 1;
        if (count >= MAX_ENTRIES) {
            // done indexing.
            break;
        }
        entry = reader.next();

        if (count % 100000 == 0) {
            out.println(String.format("Indexed %d documents", count));

            // do a quick morrissey search for fun.
            //                IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(ColDirectory.open(
            //                                new CassandraIO(8192, "collene", "cindex").start("127.0.0.1:9042"),
            //                                new CassandraIO(8192, "collene", "cmeta").start("127.0.0.1:9042"),
            //                                new CassandraIO(8192, "collene", "clock").start("127.0.0.1:9042")
            //                )));
            IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, false));
            QueryParser parser = new QueryParser(Version.LUCENE_4_9, "any", analyzer);
            long searchStart = System.currentTimeMillis();
            Query query = parser.parse("morrissey");
            TopDocs docs = searcher.search(query, 10);
            long searchEnd = System.currentTimeMillis();
            out.println(String.format("%s %d total hits in %d", directory.getClass().getSimpleName(),
                    docs.totalHits, searchEnd - searchStart));
            for (ScoreDoc d : docs.scoreDocs) {
                out.println(String.format("%d %.2f %d", d.doc, d.score, d.shardIndex));
            }
        }
    }

    if (documents.size() > 0) {
        out.println(String.format("Adding batch at count %d", count));
        writer.addDocuments(documents);
        out.println("done");
        documents.clear();

        // do a quick morrissey search for fun.
        IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, false));
        QueryParser parser = new QueryParser(Version.LUCENE_4_9, "any", analyzer);
        long searchStart = System.currentTimeMillis();
        Query query = parser.parse("morrissey");
        TopDocs docs = searcher.search(query, 10);
        long searchEnd = System.currentTimeMillis();
        out.println(String.format("%s %d total hits in %d", directory.getClass().getSimpleName(),
                docs.totalHits, searchEnd - searchStart));
        for (ScoreDoc d : docs.scoreDocs) {
            out.println(String.format("%d %.2f %d", d.doc, d.score, d.shardIndex));
        }
    }

    long indexTime = System.currentTimeMillis() - indexStart;
    out.println(String.format("Indexed %d things in %d ms (%s)", count, indexTime, directory.toString()));

    //        long startMerge = System.currentTimeMillis();
    //        writer.forceMerge(1, true);
    //        long endMerge = System.currentTimeMillis();
    //        out.println(String.format("merge took %d ms", endMerge-startMerge));
    out.println("I think these are the files:");
    for (String s : directory.listAll()) {
        out.println(s);
    }

    writer.close(true);
    directory.close();
}

From source file:collene.TestIndexing.java

License:Apache License

@Test
public void test() throws IOException, ParseException {
    Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_4_9);

    // write it out.
    IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_4_9, analyzer);
    config.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND);
    IndexWriter writer = new IndexWriter(directory, config);

    for (int i = 0; i < 100; i++) {
        Collection<Document> documents = new ArrayList<Document>();
        Document doc = new Document();
        doc.add(new Field("key", "aaa_" + i, TextField.TYPE_STORED));
        doc.add(new Field("not", "notaaa", TextField.TYPE_NOT_STORED));
        doc.add(new Field("meta", "aaa_meta_aaa_" + i, TextField.TYPE_STORED));
        documents.add(doc);/*from   w w w.j a  v a2  s.  c  om*/

        writer.addDocuments(documents);

        writer.commit();
        writer.forceMerge(1);
        writer.forceMergeDeletes(true);
    }

    // now read it back.
    IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, false));
    QueryParser parser = new QueryParser(Version.LUCENE_4_9, "key", analyzer);

    Query query = parser.parse("aaa_4");
    TopDocs docs = searcher.search(query, 1);
    int idToDelete = docs.scoreDocs[0].doc;
    Assert.assertTrue(docs.totalHits > 0);

    query = parser.parse("fersoius");
    docs = searcher.search(query, 1);
    Assert.assertFalse(docs.totalHits > 0);

    // delete that document.
    DirectoryReader reader = DirectoryReader.open(writer, true);
    writer.tryDeleteDocument(reader, idToDelete);

    reader.close();
    writer.close();

    // list files
    Set<String> files = new HashSet<String>();
    System.out.println("Listing files for " + directory.toString());
    for (String file : directory.listAll()) {
        files.add(file);
        System.out.println(" " + file);
    }

    if (strictFileChecking) {
        System.out.println("String file checking...");
        Sets.SetView<String> difference = Sets.difference(expectedFiles, files);
        Assert.assertEquals(Joiner.on(",").join(difference), 0, difference.size());
    }

    reader = DirectoryReader.open(directory);
    searcher = new IndexSearcher(reader);
    query = parser.parse("aaa_4");
    docs = searcher.search(query, 1);
    reader.close();
    Assert.assertFalse(docs.totalHits > 0);

    directory.close();
}

From source file:collene.TestLuceneAssumptions.java

License:Apache License

@Test
public void testCanSeeUpdatesAfterAdd() throws Exception {
    // this verifies that any reader can see updates after documents are added.
    File fdir = TestUtil.getRandomTempDir();
    pleaseDelete.add(fdir);/*from  ww  w  . j  a  v  a  2s . co m*/

    Directory dir = FSDirectory.open(fdir);
    Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_4_9);
    IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_4_9, analyzer);
    config.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND);
    IndexWriter writer = new IndexWriter(dir, config);

    Document doc0 = new Document();
    Document doc1 = new Document();
    doc0.add(new Field("f0", "aaa", TextField.TYPE_STORED));
    doc1.add(new Field("f0", "bbb", TextField.TYPE_STORED));
    List<Document> docs = Lists.newArrayList(doc0, doc1);
    writer.addDocuments(docs, analyzer);

    IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, false));
    QueryParser parser = new QueryParser(Version.LUCENE_4_9, "f0", new StandardAnalyzer(Version.LUCENE_4_9));

    Query query = parser.parse("bbb");
    TopDocs topDocs = searcher.search(query, 10);

    Assert.assertEquals(1, topDocs.totalHits);
    Assert.assertEquals(1, topDocs.scoreDocs.length);

    writer.close();
    dir.close();
}