Example usage for org.apache.lucene.index MultiReader MultiReader

List of usage examples for org.apache.lucene.index MultiReader MultiReader

Introduction

In this page you can find the example usage for org.apache.lucene.index MultiReader MultiReader.

Prototype

public MultiReader(IndexReader... subReaders) throws IOException 

Source Link

Document

Construct a MultiReader aggregating the named set of (sub)readers.

Usage

From source file:TestSearch.java

public static void main(String[] args) throws Exception {

    String path = "E:\\jar\\product-index-mongo\\product-index-mongo\\index";
    List<IndexReader> readers = new ArrayList<IndexReader>();
    IndexReader reader1 = IndexReader.open(path);

    readers.add(reader1);/*ww  w. ja v  a2  s.  c  o  m*/

    System.out.println("DOC2 TOTAL:\t" + reader1.maxDoc());

    MultiReader reader = new MultiReader(readers.toArray(new IndexReader[0]));
    IndexConfig config = new IndexConfig();
    config.initConfig("D:\\mainonecode\\mainonesearch\\mainone-index-product\\config\\index\\index.conf");
    QueryFilters.loadFilters("D:\\mainonecode\\mainonesearch\\mainone-index-product\\config\\index\\filter");
    IndexSearcher searcher = new IndexSearcher(reader, false, config, null, null);
    // searcher.delDocByDocnum(new int[]{714575965});
    // searcher.unDeleteDocByDocNum(714575965);
    ScorePluginLoader.load(reader);
    // ScoreParam scoreParam = new ScoreParam();
    // scoreParam.setSearchType((byte)1);
    // // scoreParam.setSearchType((byte)5);
    // scoreParam.setSortType((byte)-1);
    // scoreParam.setLongitude(104.08022f);
    // scoreParam.setLatitude(30.635338f);
    // scoreParam.setLbs(3);
    ProductQueryOptimizer optimizer = new ProductQueryOptimizer(16, 0.05f);
    BufferedReader strin = new BufferedReader(new InputStreamReader(System.in));
    BufferedWriter writer = new BufferedWriter(new FileWriter("e:/result.txt"));

    ProductQueryParam queryParam = makeParam();

    long begin = System.currentTimeMillis();
    System.out.println("?\t");
    ProductHits hits = (ProductHits) optimizer.optimize(searcher, queryParam);
    long end = System.currentTimeMillis();
    System.out.println("?:\t" + (end - begin));

    System.out.println("?:\t" + hits.getTotal());
    System.out.println("?:\t" + hits.getGrpHits());

    BooleanQuery original = QueryFilters.filter(queryParam.getQuery(), Constants.SLOP_SCORE_CVM_CLASS);
    ClusterBean[] trades = hits.getIndustryids();
    for (int i = 0; i < trades.length; i++) {
        System.out.println(trades[i].getId() + "\t" + trades[i].getNum());
    }
    Hit[] docs = hits.getHits();
    for (int i = 0; i < docs.length; i++) {
        writer.write(docs[i].getIndexDocNo() + "\n");
        Properties prop = searcher.getDetailSummary(docs[i].getIndexDocNo(), "", 1, 0);
        BitVector haspic = (BitVector) MemoryFieldCache.get("haspic");
        // System.out.println(prop);

        System.out.println(docs[i].getIndexDocNo() + "\t" + docs[i].getScore() + "\t"
                + prop.getProperty("title") + "\t" + prop.getProperty("keyword") + "haspic:\t"
                + haspic.get(docs[i].getIndexDocNo()) + "\tindustryid:\t" + prop.getProperty("industryid")
                + "\t[" + prop.getProperty("spec") + "\t" + prop.getProperty("unit") + "\t"
                + prop.getProperty("price") + "\t" + prop.getProperty("mincount") + "]"

        );
        // Explanation explain = searcher.explain(original,
        // docs[i].getIndexDocNo());
        // System.out.println(explain.toString());
    }
    writer.flush();
    // Thread.currentThread().join();
}

From source file:com.flaptor.hounder.MultiIndex.java

License:Apache License

/**
 * Gets a reader for this MultiIndex. Internally, gets readers for its 
 * children, and generates a {@link MultiReader} with them
 *  //  www  . ja  v  a  2s.  c  o m
 * @return
 *       A {@link MultiReader} that can read on every of this index children.
 */
@Override
public IndexReader getReader() {
    if (state == State.closed) {
        throw new IllegalStateException("Can't get reader: the index is closed.");
    }

    // Special case, that will happen when using super(File) and before
    // children is assigned. in this case we return null, but will have
    // to call getReader() after assigning children.
    // THIS IS A HORRIBLE HACK
    if (null == children) {
        logger.warn(
                "First creation of a MultiIndex. Will have to return a null reader until I know my children.");
        return null;
    }

    // try - catch code commented out because in lucene 2.3.1 it multireader does not throw IOException
    // TODO remove this commented code (mono)
    //try {
    IndexReader[] readers = new IndexReader[children.size()];
    for (int i = 0; i < readers.length; i++) {
        readers[i] = children.get(i).getReader();
    }
    reader = new MultiReader(readers);
    return reader;
    //} catch (IOException e) {
    //    logger.error(e,e);
    //    throw new RuntimeException("Error while getting reader: " + e.getMessage(),e);
    //}
}

From source file:com.globalsight.ling.tm2.lucene.LuceneCache.java

License:Apache License

/**
 * For MultiReader/* ww  w  .j  a  v a 2  s .c o  m*/
 * 
 * @param tmIds
 * @param ireaderArray
 * @return
 */
public static LuceneCache getLuceneCache(ArrayList<Long> tmIds, IndexReader[] ireaderArray) {
    String key = tmIdToKey(tmIds);

    if (cache.containsKey(key)) {
        LuceneCache lc = cache.get(key);
        return lc;
    }

    MultiReader indexReader = new MultiReader(ireaderArray);
    IndexSearcher iS = new IndexSearcher(indexReader);
    LuceneCache lc = new LuceneCache(key, indexReader, iS);
    lc.setMultiReader(true);

    indexReader.addReaderClosedListener(lc);
    for (IndexReader irOri : ireaderArray) {
        irOri.addReaderClosedListener(lc);
    }

    cache.put(key, lc);

    return lc;
}

From source file:com.jaeksoft.searchlib.index.ReaderLocal.java

License:Open Source License

ReaderLocal(IndexConfig indexConfig, IndexDirectory indexDirectory) throws IOException, SearchLibException {
    super(indexConfig);
    spellCheckCache = new SpellCheckCache(100);
    docSetHitsCache = new DocSetHitsCache(indexConfig);
    this.indexDirectory = indexDirectory;
    references = new AtomicInteger(0);
    acquire();// w w w .  j  av a2  s  .  co  m
    Directory directory = indexDirectory.getDirectory();
    if (directory == null)
        throw new IOException("The directory is closed");
    if (indexConfig.isMulti()) {
        List<String> indexList = indexConfig.getIndexList();
        indexDirectories = new IndexDirectory[indexList.size()];
        indexReaders = new IndexReader[indexList.size()];
        int i = 0;
        for (String indexName : indexList) {
            IndexDirectory indexDir = new IndexDirectory(
                    new File(ClientCatalog.getClient(indexName).getDirectory(), "index"));
            indexDirectories[i] = indexDir;
            indexReaders[i++] = IndexReader.open(indexDir.getDirectory());
        }
        indexReader = new MultiReader(indexReaders);
    } else {
        indexReaders = null;
        indexDirectories = null;
        indexReader = IndexReader.open(directory);
    }
    indexSearcher = new IndexSearcher(indexReader);

    Similarity similarity = indexConfig.getNewSimilarityInstance();
    if (similarity != null)
        indexSearcher.setSimilarity(similarity);
}

From source file:com.lucene.index.test.IKAnalyzerdemo.java

License:Apache License

/**
 * /*  www. ja  v a 2 s  . c  om*/
 * ???
 * @param args
 */
public static void main(String[] args) {
    //Lucene Document??
    String fieldName = "text";
    //
    String text1 = "oracle,?";
    String text2 = "?";
    String text3 = "?";

    //IKAnalyzer?
    Analyzer analyzer = new IKAnalyzer();

    Directory directory1 = null;
    Directory directory2 = null;
    IndexWriter iwriter1 = null;
    IndexWriter iwriter2 = null;
    IndexReader ireader1 = null;
    IndexReader ireader2 = null;
    IndexSearcher isearcher = null;
    try {
        //
        directory1 = new RAMDirectory();
        directory2 = new RAMDirectory();

        //?IndexWriterConfig

        IndexWriterConfig iwConfig1 = new IndexWriterConfig(analyzer);
        iwConfig1.setOpenMode(OpenMode.CREATE);

        IndexWriterConfig iwConfig2 = new IndexWriterConfig(analyzer);
        iwConfig2.setOpenMode(OpenMode.CREATE);
        iwriter1 = new IndexWriter(directory1, iwConfig1);
        iwriter2 = new IndexWriter(directory2, iwConfig2);

        //
        Document doc1 = new Document();
        doc1.add(new StringField("ID", "10000", Field.Store.YES));
        doc1.add(new TextField("text1", text1, Field.Store.YES));
        iwriter1.addDocument(doc1);

        Document doc2 = new Document();
        doc2.add(new StringField("ID", "10001", Field.Store.YES));
        doc2.add(new TextField("text2", text2, Field.Store.YES));
        iwriter2.addDocument(doc2);

        iwriter1.close();
        iwriter2.close();

        //?**********************************
        //?   
        ireader1 = DirectoryReader.open(directory1);
        ireader2 = DirectoryReader.open(directory2);

        IndexReader[] mreader = { ireader1, ireader2 };

        MultiReader multiReader = new MultiReader(mreader);

        isearcher = new IndexSearcher(multiReader);

        String keyword = "?";
        //QueryParser?Query
        String[] fields = { "text1", "text2" };

        Map<String, Float> boosts = new HashMap<String, Float>();
        boosts.put("text1", 5.0f);
        boosts.put("text2", 2.0f);
        /**MultiFieldQueryParser??? 
         * */
        MultiFieldQueryParser parser = new MultiFieldQueryParser(fields, analyzer, boosts);
        Query query = parser.parse(keyword);

        System.out.println("Query = " + query);

        //?5?
        TopDocs topDocs = isearcher.search(query, 5);
        System.out.println("" + topDocs.totalHits);
        //
        ScoreDoc[] scoreDocs = topDocs.scoreDocs;
        for (int i = 0; i < topDocs.totalHits; i++) {
            Document targetDoc = isearcher.doc(scoreDocs[i].doc);
            System.out.println("" + targetDoc.toString());
        }

    } catch (CorruptIndexException e) {
        e.printStackTrace();
    } catch (LockObtainFailedException e) {
        e.printStackTrace();
    } catch (IOException e) {
        e.printStackTrace();
    } catch (ParseException e) {
        e.printStackTrace();
    } finally {
        if (ireader1 != null) {
            try {
                ireader1.close();
                ireader2.close();
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
        if (directory1 != null) {
            try {
                directory1.close();
                directory2.close();
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
    }
}

From source file:com.mathworks.xzheng.tools.remote.SearchServer.java

License:Apache License

public static void main(String[] args) throws Exception {
    if (args.length != 1) {
        System.err.println("Usage: SearchServer <basedir>");
        System.exit(-1);/*from   ww w .j  ava2 s. c o m*/
    }

    String basedir = args[0]; //1
    Directory[] dirs = new Directory[ALPHABET.length()];
    IndexReader[] searchables = new IndexReader[ALPHABET.length()];
    for (int i = 0; i < ALPHABET.length(); i++) {
        dirs[i] = FSDirectory.open(new File(basedir, "" + ALPHABET.charAt(i)));
        searchables[i] = DirectoryReader.open(dirs[i]); //2
    }

    LocateRegistry.createRegistry(1099); //3

    IndexSearcher multiSearcher = new IndexSearcher(new MultiReader(searchables)); //4

    ExecutorService pool = Executors.newFixedThreadPool(nThreads);

    IndexSearcher multiImpl = //4
            new IndexSearcher(multiSearcher); //4
    Naming.rebind("//localhost/LIA_Multi", multiImpl); //4

    Searcher parallelSearcher = //5
            new ParallelMultiSearcher(searchables); //5
    RemoteSearchable parallelImpl = //5
            new RemoteSearchable(parallelSearcher); //5
    Naming.rebind("//localhost/LIA_Parallel", parallelImpl);//5

    System.out.println("Server started");

    for (int i = 0; i < ALPHABET.length(); i++) {
        dirs[i].close();
    }
}

From source file:com.xiaomi.linden.hadoop.indexing.keyvalueformat.IntermediateForm.java

License:Apache License

/**
 * This method is used by the index update combiner and process an
 * intermediate form into the current intermediate form. More specifically,
 * the input intermediate forms are a single-document ram index and/or a
 * single delete term.//from   w w  w  . j  a va  2s .c  o  m
 * @param form  the input intermediate form
 * @throws IOException
 */
public void process(IntermediateForm form, FacetsConfig facetsConfig) throws IOException {
    if (form.dir.ramBytesUsed() > 0 || form.taxoDir.ramBytesUsed() > 0) {
        if (writer == null) {
            createWriter();
        }

        if (facetsConfig != null) {
            DirectoryTaxonomyWriter.OrdinalMap map = new DirectoryTaxonomyWriter.MemoryOrdinalMap();
            // merge the taxonomies
            taxoWriter.addTaxonomy(form.taxoDir, map);
            int ordinalMap[] = map.getMap();
            DirectoryReader reader = DirectoryReader.open(form.dir);
            try {
                List<AtomicReaderContext> leaves = reader.leaves();
                int numReaders = leaves.size();
                AtomicReader wrappedLeaves[] = new AtomicReader[numReaders];
                for (int i = 0; i < numReaders; i++) {
                    wrappedLeaves[i] = new OrdinalMappingAtomicReader(leaves.get(i).reader(), ordinalMap,
                            facetsConfig);
                }
                writer.addIndexes(new MultiReader(wrappedLeaves));
            } finally {
                reader.close();
            }
        } else {
            writer.addIndexes(new Directory[] { form.dir });
        }
        numDocs++;
    }
}

From source file:com.xiaomi.linden.hadoop.indexing.reduce.ShardWriter.java

License:Apache License

/**
 * Process an intermediate form by carrying out, on the Lucene instance of
 * the shard, the deletes and the inserts (a ram index) in the form.
 * @param form  the intermediate form containing deletes and a ram index
 * @throws IOException/*from   ww w .j a va  2  s . c  om*/
 */
public void process(IntermediateForm form, FacetsConfig facetsConfig) throws IOException {
    if (facetsConfig != null) {
        DirectoryTaxonomyWriter.OrdinalMap map = new DirectoryTaxonomyWriter.MemoryOrdinalMap();
        // merge the taxonomies
        taxoWriter.addTaxonomy(form.getTaxoDirectory(), map);
        int ordinalMap[] = map.getMap();
        DirectoryReader reader = DirectoryReader.open(form.getDirectory());
        try {
            List<AtomicReaderContext> leaves = reader.leaves();
            int numReaders = leaves.size();
            AtomicReader wrappedLeaves[] = new AtomicReader[numReaders];
            for (int i = 0; i < numReaders; i++) {
                wrappedLeaves[i] = new OrdinalMappingAtomicReader(leaves.get(i).reader(), ordinalMap,
                        facetsConfig);
            }
            writer.addIndexes(new MultiReader(wrappedLeaves));
        } finally {
            reader.close();
        }
    } else {
        writer.addIndexes(new Directory[] { form.getDirectory() });
    }
    numForms++;
}

From source file:de.csw.linkgenerator.plugin.lucene.LucenePlugin.java

License:Open Source License

/**
 * Creates and submits a query to the Lucene engine.
 * /*from  w  ww  .j a va2  s.c  o  m*/
 * @param query The base query, using the query engine supported by Lucene.
 * @param sort A Lucene sort object, can contain one or more sort criterias. If <tt>null</tt>,
 *            sort by hit score.
 * @param virtualWikiNames Comma separated list of virtual wiki names to search in, may be
 *            <tt>null</tt> to search all virtual wikis.
 * @param languages Comma separated list of language codes to search in, may be <tt>null</tt>
 *            or empty to search all languages.
 * @param indexes List of Lucene indexes (searchers) to search.
 * @param context The context of the request.
 * @return The list of search results.
 * @throws IOException If the Lucene searchers encounter a problem reading the indexes.
 * @throws ParseException If the query is not valid.
 */
private SearchResults search(String query, Sort sort, String virtualWikiNames, String languages,
        IndexSearcher[] indexes, XWikiContext context)
        throws IOException, org.apache.lucene.queryparser.classic.ParseException {
    //        MultiSearcher searcher = new MultiSearcher(indexes);
    IndexReader[] readers = new IndexReader[indexes.length];
    for (int i = 0; i < readers.length; i++) {
        readers[i] = indexes[i].getIndexReader();
    }

    IndexSearcher searcher = new IndexSearcher(new MultiReader(readers));
    // Enhance the base query with wiki names and languages.
    Query q = buildQuery(query, virtualWikiNames, languages);

    TopDocsCollector<? extends ScoreDoc> topDocs;
    if (sort != null) {
        topDocs = TopFieldCollector.create(sort, MAX_RESULTS, true, true, false, false);
    } else {
        topDocs = TopScoreDocCollector.create(MAX_RESULTS, false);
    }

    // Perform the actual search
    searcher.search(q, topDocs);

    // Transform the raw Lucene search results into XWiki-aware results
    return new SearchResults(topDocs, searcher, new com.xpn.xwiki.api.XWiki(context.getWiki(), context),
            context);
}

From source file:net.javacoding.xsearch.search.searcher.DefaultSearcherProvider.java

private Object initIndexReader() {
    if (_irs != null)
        return _irs;
    IndexReader indexReader = null;//from  w  ww  .java 2  s.c o  m
    IndexReader indexReader2 = null;
    try {
        //logger.debug("create searcher under directory: " + dir);
        if (mainDirectory != null && IndexReader.indexExists(mainDirectory)) {
            indexReader = IndexReader.open(mainDirectory);
        }
        if (tempDirectory != null && IndexReader.indexExists(tempDirectoryFile)) {
            indexReader2 = IndexReader.open(tempDirectory);
        }
        if (indexReader == null && indexReader2 == null) {
        } else if (indexReader != null && indexReader2 == null) {
            _irs = new IndexReaderSearcher(indexReader);
        } else if (indexReader == null && indexReader2 != null) {
            _irs = new IndexReaderSearcher(indexReader2);
        } else {
            IndexReader[] indexReaders = new IndexReader[2];
            indexReaders[0] = indexReader;
            indexReaders[1] = indexReader2;
            indexReader = new MultiReader(indexReaders);
            _irs = new IndexReaderSearcher(indexReader);
        }
    } catch (IOException e) {
        logger.warn("Failed to create Searcher: " + e.toString());
        e.printStackTrace();
    }
    if (_irs != null) {
        _irs.setSearcherProvider(this);
    }
    return _irs;
}