Example usage for org.apache.lucene.index IndexWriter close

List of usage examples for org.apache.lucene.index IndexWriter close

Introduction

In this page you can find the example usage for org.apache.lucene.index IndexWriter close.

Prototype

@Override
public void close() throws IOException 

Source Link

Document

Closes all open resources and releases the write lock.

Usage

From source file:com.leavesfly.lia.searching.ScoreTest.java

License:Apache License

private void indexSingleFieldDocs(Field[] fields) throws Exception {
    IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(),
            IndexWriter.MaxFieldLength.UNLIMITED);
    for (Field f : fields) {
        Document doc = new Document();
        doc.add(f);//  w w w  .j a  v  a2 s.c  o  m
        writer.addDocument(doc);
    }
    writer.optimize();
    writer.close();
}

From source file:com.leavesfly.lia.tool.ChainedFilterTest.java

License:Apache License

public void setUp() throws Exception {
    directory = new RAMDirectory();
    IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(),
            IndexWriter.MaxFieldLength.UNLIMITED);

    Calendar cal = Calendar.getInstance();
    cal.set(2009, 1, 1, 0, 0); // A

    for (int i = 0; i < MAX; i++) {
        Document doc = new Document();
        doc.add(new Field("key", "" + (i + 1), Field.Store.YES, Field.Index.NOT_ANALYZED));
        doc.add(new Field("owner", (i < MAX / 2) ? "bob" : "sue", Field.Store.YES, Field.Index.NOT_ANALYZED));
        doc.add(new Field("date", DateTools.timeToString(cal.getTimeInMillis(), DateTools.Resolution.DAY),
                Field.Store.YES, Field.Index.NOT_ANALYZED));
        writer.addDocument(doc);/*from   w w w. jav  a2s  .c  o m*/

        cal.add(Calendar.DATE, 1);
    }

    writer.close();

    searcher = new IndexSearcher(directory);

    BooleanQuery bq = new BooleanQuery(); // B
    bq.add(new TermQuery(new Term("owner", "bob")), // B
            BooleanClause.Occur.SHOULD); // B
    bq.add(new TermQuery(new Term("owner", "sue")), // B
            BooleanClause.Occur.SHOULD); // B
    query = bq;

    cal.set(2099, 1, 1, 0, 0);
    dateFilter = TermRangeFilter.Less("date", // C
            DateTools.timeToString( // C
                    cal.getTimeInMillis(), // C
                    DateTools.Resolution.DAY));// C

    bobFilter = new CachingWrapperFilter( // D
            new QueryWrapperFilter( // D
                    new TermQuery(new Term("owner", "bob")))); // D

    sueFilter = new CachingWrapperFilter( // E
            new QueryWrapperFilter( // E
                    new TermQuery(new Term("owner", "sue")))); // E
}

From source file:com.liferay.portal.util.LuceneIndexer.java

License:Open Source License

public void reIndex() {
    _log.info("Re-indexing Lucene started");

    long start = System.currentTimeMillis();

    String luceneDir = LuceneUtil.getLuceneDir(_companyId);

    FileUtil.deltree(luceneDir);//w w  w  .  j a  va  2  s  .  c  o m

    try {
        IndexWriter writer = LuceneUtil.getWriter(_companyId, true);

        writer.close();
    } catch (IOException ioe) {
        Logger.error(LuceneIndexer.class, ioe.getMessage(), ioe);
    }

    String[] indexIds = new String[] { _companyId };

    try {
        Iterator itr = PortletManagerUtil.getPortlets(_companyId).iterator();

        while (itr.hasNext()) {
            Portlet portlet = (Portlet) itr.next();

            String className = portlet.getIndexerClass();

            if (portlet.isActive() && className != null) {
                _log.debug("Re-indexing with " + className + " started");

                Indexer indexer = (Indexer) InstancePool.get(className);

                indexer.reIndex(indexIds);

                _log.debug("Re-indexing with " + className + " completed");
            }
        }
    } catch (Exception e) {
        Logger.error(LuceneIndexer.class, e.getMessage(), e);
    }

    long end = System.currentTimeMillis();

    _log.info("Re-indexing Lucene completed in " + ((end - start) / Time.SECOND) + " seconds");

    _finished = true;
}

From source file:com.lin.studytest.lucene.IndexFiles.java

License:Apache License

/** Index all text files under a directory. */
public static void main(String[] args) {
    //      String usage = "java org.apache.lucene.demo.IndexFiles"
    //            + " [-index INDEX_PATH] [-docs DOCS_PATH] [-update]\n\n"
    //            + "This indexes the documents in DOCS_PATH, creating a Lucene index"
    //            + "in INDEX_PATH that can be searched with SearchFiles";
    String indexPath = "D:\\software\\lucene\\testdata\\indexpath";
    String docsPath = "D:\\software\\lucene\\testdata\\docpath";
    RAMDirectory ramDirectory = new RAMDirectory();

    boolean create = false;
    //      for(int i=0;i<args.length;i++) {
    //         if ("-index".equals(args[i])) {
    //            indexPath = args[i+1];
    //            i++;
    //         } else if ("-docs".equals(args[i])) {
    //            docsPath = args[i+1];
    //            i++;
    //         } else if ("-update".equals(args[i])) {
    //            create = false;
    //         }// ww  w .ja v  a2  s.com
    //      }

    //      if (docsPath == null) {
    //         System.err.println("Usage: " + usage);
    //         System.exit(1);
    //      }

    final Path docDir = Paths.get(docsPath);
    if (!Files.isReadable(docDir)) {
        System.out.println("Document directory '" + docDir.toAbsolutePath()
                + "' does not exist or is not readable, please check the path");
        System.exit(1);
    }

    Date start = new Date();
    try {
        System.out.println("Indexing to directory '" + indexPath + "'...");

        Directory dir = FSDirectory.open(Paths.get(indexPath));
        Analyzer analyzer = new SmartChineseAnalyzer();
        IndexWriterConfig iwc = new IndexWriterConfig(analyzer);

        if (create) {
            // Create a new index in the directory, removing any
            // previously indexed documents:
            iwc.setOpenMode(OpenMode.CREATE);
        } else {
            // Add new documents to an existing index:
            iwc.setOpenMode(OpenMode.CREATE_OR_APPEND);
        }

        // Optional: for better indexing performance, if you
        // are indexing many documents, increase the RAM
        // buffer.  But if you do this, increase the max heap
        // size to the JVM (eg add -Xmx512m or -Xmx1g):
        //
        // iwc.setRAMBufferSizeMB(256.0);

        IndexWriter writer = new IndexWriter(dir, iwc);
        indexDocs(writer, docDir);

        // NOTE: if you want to maximize search performance,
        // you can optionally call forceMerge here.  This can be
        // a terribly costly operation, so generally it's only
        // worth it when your index is relatively static (ie
        // you're done adding documents to it):
        //
        // writer.forceMerge(1);

        writer.close();

        Date end = new Date();
        System.out.println(end.getTime() - start.getTime() + " total milliseconds");

    } catch (IOException e) {
        System.out.println(" caught a " + e.getClass() + "\n with message: " + e.getMessage());
    }
}

From source file:com.liusoft.dlog4j.search.SearchProxy.java

License:Open Source License

/**
 * //from  w ww . j a v a  2  s  . c o  m
 * @param doc
 * @throws Exception 
 */
public static synchronized void add(SearchEnabled doc) throws Exception {
    if (doc == null)
        return;

    Document lucene_doc = new Document();

    //Set keyword field
    String key = getField(doc, doc.getKeywordField());
    lucene_doc.add(Keyword(doc.getKeywordField(), key));

    //Set identity(classname) of object
    lucene_doc.add(Keyword(CLASSNAME_FIELD, doc.getClass().getName()));

    //Set storage field
    String[] storeFields = doc.getStoreFields();
    for (int i = 0; storeFields != null && i < storeFields.length; i++) {
        String propertyValue = getField(doc, storeFields[i]);
        if (propertyValue != null)
            lucene_doc.add(Keyword(storeFields[i], propertyValue));
    }
    //Set indexed field
    String[] indexFields = doc.getIndexFields();
    for (int i = 0; indexFields != null && i < indexFields.length; i++) {
        String propertyValue = getField(doc, indexFields[i]);
        lucene_doc.add(UnStored(indexFields[i], propertyValue));
    }
    //Write document
    IndexWriter writer = getWriter(doc.name());
    try {
        writer.addDocument(lucene_doc);
        writer.optimize();
    } finally {
        try {
            writer.close();
        } catch (Exception e) {
            log.error("Error occur when closing IndexWriter", e);
        } finally {
            writer = null;
        }
        lucene_doc = null;
    }

}

From source file:com.lorelib.analyzer.sample.LuceneIndexAndSearchDemo.java

License:Apache License

/**
 * /*www  .j  a va2s .  c  om*/
 * ???
 * @param args
 */
public static void main(String[] args) {
    //Lucene Document??
    String fieldName = "text";
    //
    String text = "IK Analyzer???????";

    //IKAnalyzer?
    Analyzer analyzer = new IKAnalyzer(true);

    Directory directory = null;
    IndexWriter iwriter = null;
    IndexReader ireader = null;
    IndexSearcher isearcher = null;
    try {
        //
        directory = new RAMDirectory();

        //?IndexWriterConfig
        IndexWriterConfig iwConfig = new IndexWriterConfig(analyzer);
        iwConfig.setOpenMode(OpenMode.CREATE_OR_APPEND);
        iwriter = new IndexWriter(directory, iwConfig);
        //
        Document doc = new Document();
        doc.add(new StringField("ID", "10000", Field.Store.YES));
        doc.add(new TextField(fieldName, text, Field.Store.YES));
        iwriter.addDocument(doc);
        iwriter.close();

        //?**********************************
        //?
        ireader = DirectoryReader.open(directory);
        isearcher = new IndexSearcher(ireader);

        String keyword = "?";
        //QueryParser?Query
        QueryParser qp = new QueryParser(fieldName, analyzer);
        qp.setDefaultOperator(QueryParser.AND_OPERATOR);
        Query query = qp.parse(keyword);
        System.out.println("Query = " + query);

        //?5?
        TopDocs topDocs = isearcher.search(query, 5);
        System.out.println("" + topDocs.totalHits);
        //
        ScoreDoc[] scoreDocs = topDocs.scoreDocs;
        for (int i = 0; i < topDocs.totalHits; i++) {
            Document targetDoc = isearcher.doc(scoreDocs[i].doc);
            System.out.println("" + targetDoc.toString());
        }

    } catch (CorruptIndexException e) {
        e.printStackTrace();
    } catch (LockObtainFailedException e) {
        e.printStackTrace();
    } catch (IOException e) {
        e.printStackTrace();
    } catch (ParseException e) {
        e.printStackTrace();
    } finally {
        if (ireader != null) {
            try {
                ireader.close();
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
        if (directory != null) {
            try {
                directory.close();
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
    }
}

From source file:com.lucene.index.test.IKAnalyzerdemo.java

License:Apache License

/**
 * //w w  w.j ava 2s. co  m
 * ???
 * @param args
 */
public static void main(String[] args) {
    //Lucene Document??
    String fieldName = "text";
    //
    String text1 = "oracle,?";
    String text2 = "?";
    String text3 = "?";

    //IKAnalyzer?
    Analyzer analyzer = new IKAnalyzer();

    Directory directory1 = null;
    Directory directory2 = null;
    IndexWriter iwriter1 = null;
    IndexWriter iwriter2 = null;
    IndexReader ireader1 = null;
    IndexReader ireader2 = null;
    IndexSearcher isearcher = null;
    try {
        //
        directory1 = new RAMDirectory();
        directory2 = new RAMDirectory();

        //?IndexWriterConfig

        IndexWriterConfig iwConfig1 = new IndexWriterConfig(analyzer);
        iwConfig1.setOpenMode(OpenMode.CREATE);

        IndexWriterConfig iwConfig2 = new IndexWriterConfig(analyzer);
        iwConfig2.setOpenMode(OpenMode.CREATE);
        iwriter1 = new IndexWriter(directory1, iwConfig1);
        iwriter2 = new IndexWriter(directory2, iwConfig2);

        //
        Document doc1 = new Document();
        doc1.add(new StringField("ID", "10000", Field.Store.YES));
        doc1.add(new TextField("text1", text1, Field.Store.YES));
        iwriter1.addDocument(doc1);

        Document doc2 = new Document();
        doc2.add(new StringField("ID", "10001", Field.Store.YES));
        doc2.add(new TextField("text2", text2, Field.Store.YES));
        iwriter2.addDocument(doc2);

        iwriter1.close();
        iwriter2.close();

        //?**********************************
        //?   
        ireader1 = DirectoryReader.open(directory1);
        ireader2 = DirectoryReader.open(directory2);

        IndexReader[] mreader = { ireader1, ireader2 };

        MultiReader multiReader = new MultiReader(mreader);

        isearcher = new IndexSearcher(multiReader);

        String keyword = "?";
        //QueryParser?Query
        String[] fields = { "text1", "text2" };

        Map<String, Float> boosts = new HashMap<String, Float>();
        boosts.put("text1", 5.0f);
        boosts.put("text2", 2.0f);
        /**MultiFieldQueryParser??? 
         * */
        MultiFieldQueryParser parser = new MultiFieldQueryParser(fields, analyzer, boosts);
        Query query = parser.parse(keyword);

        System.out.println("Query = " + query);

        //?5?
        TopDocs topDocs = isearcher.search(query, 5);
        System.out.println("" + topDocs.totalHits);
        //
        ScoreDoc[] scoreDocs = topDocs.scoreDocs;
        for (int i = 0; i < topDocs.totalHits; i++) {
            Document targetDoc = isearcher.doc(scoreDocs[i].doc);
            System.out.println("" + targetDoc.toString());
        }

    } catch (CorruptIndexException e) {
        e.printStackTrace();
    } catch (LockObtainFailedException e) {
        e.printStackTrace();
    } catch (IOException e) {
        e.printStackTrace();
    } catch (ParseException e) {
        e.printStackTrace();
    } finally {
        if (ireader1 != null) {
            try {
                ireader1.close();
                ireader2.close();
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
        if (directory1 != null) {
            try {
                directory1.close();
                directory2.close();
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
    }
}

From source file:com.lucene.index.test.IKAnalyzerdemoMutilField.java

License:Apache License

/**
 * //from  w ww  .  j ava2 s . co  m
 * ???
 * @param args
 */
public static void main(String[] args) {
    //Lucene Document??
    String fieldName = "text";
    //
    String text1 = "oracle?";
    String text2 = "?";
    String text3 = "?";

    //IKAnalyzer?
    Analyzer analyzer = new IKAnalyzer();

    Directory directory = null;
    IndexWriter iwriter = null;
    IndexReader ireader = null;
    IndexSearcher isearcher = null;
    try {
        //
        directory = new RAMDirectory();

        //?IndexWriterConfig

        IndexWriterConfig iwConfig = new IndexWriterConfig(analyzer);
        iwConfig.setOpenMode(OpenMode.CREATE_OR_APPEND);
        iwriter = new IndexWriter(directory, iwConfig);
        //
        Document doc1 = new Document();
        doc1.add(new StringField("ID", "10000", Field.Store.YES));
        doc1.add(new TextField(fieldName, text1, Field.Store.YES));
        iwriter.addDocument(doc1);

        Document doc2 = new Document();
        doc2.add(new StringField("ID", "10000", Field.Store.YES));
        doc2.add(new TextField(fieldName, text2, Field.Store.YES));
        iwriter.addDocument(doc2);

        Document doc3 = new Document();
        doc3.add(new StringField("ID", "10000", Field.Store.YES));
        doc3.add(new TextField(fieldName, text3, Field.Store.YES));
        iwriter.addDocument(doc3);
        iwriter.close();

        //?**********************************
        //?   
        ireader = DirectoryReader.open(directory);
        isearcher = new IndexSearcher(ireader);

        String keyword = "?";
        //QueryParser?Query
        QueryParser qp = new QueryParser(fieldName, analyzer);
        qp.setDefaultOperator(QueryParser.AND_OPERATOR);
        Query query = qp.parse(keyword);
        System.out.println("Query = " + query);

        //?5?
        TopDocs topDocs = isearcher.search(query, 5);
        System.out.println("" + topDocs.totalHits);
        //
        ScoreDoc[] scoreDocs = topDocs.scoreDocs;
        for (int i = 0; i < topDocs.totalHits; i++) {
            Document targetDoc = isearcher.doc(scoreDocs[i].doc);
            System.out.println("" + targetDoc.toString());
        }

    } catch (CorruptIndexException e) {
        e.printStackTrace();
    } catch (LockObtainFailedException e) {
        e.printStackTrace();
    } catch (IOException e) {
        e.printStackTrace();
    } catch (ParseException e) {
        e.printStackTrace();
    } finally {
        if (ireader != null) {
            try {
                ireader.close();
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
        if (directory != null) {
            try {
                directory.close();
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
    }
}

From source file:com.lucene.index.test.IndexFiles.java

License:Apache License

/** Index all text files under a directory. */
public static void main(String[] args) {
    String usage = "java org.apache.lucene.demo.IndexFiles"
            + " [-index INDEX_PATH] [-docs DOCS_PATH] [-update]\n\n"
            + "This indexes the documents in DOCS_PATH, creating a Lucene index"
            + "in INDEX_PATH that can be searched with SearchFiles";
    String indexPath = "index";
    String docsPath = null;//from   w  w w  .j  a v a  2s  .  c o m
    boolean create = true;
    for (int i = 0; i < args.length; i++) {
        if ("-index".equals(args[i])) {
            indexPath = args[i + 1];
            i++;
        } else if ("-docs".equals(args[i])) {
            docsPath = args[i + 1];
            i++;
        } else if ("-update".equals(args[i])) {
            create = false;
        }
    }

    if (docsPath == null) {
        System.err.println("Usage: " + usage);
        System.exit(1);
    }

    final Path docDir = Paths.get(docsPath);
    if (!Files.isReadable(docDir)) {
        System.out.println("Document directory '" + docDir.toAbsolutePath()
                + "' does not exist or is not readable, please check the path");
        System.exit(1);
    }

    Date start = new Date();

    long Cbegintime = System.nanoTime();// 

    try {
        System.out.println("Indexing to directory '" + indexPath + "'...");

        Directory dir = FSDirectory.open(Paths.get(indexPath));
        Analyzer analyzer = new StandardAnalyzer();
        IndexWriterConfig iwc = new IndexWriterConfig(analyzer);

        if (create) {
            // Create a new index in the directory, removing any
            // previously indexed documents:
            iwc.setOpenMode(OpenMode.CREATE);
        } else {
            // Add new documents to an existing index:
            iwc.setOpenMode(OpenMode.CREATE_OR_APPEND);
        }

        // Optional: for better indexing performance, if you
        // are indexing many documents, increase the RAM
        // buffer.  But if you do this, increase the max heap
        // size to the JVM (eg add -Xmx512m or -Xmx1g):
        //
        // iwc.setRAMBufferSizeMB(256.0);

        IndexWriter writer = new IndexWriter(dir, iwc);
        indexDocs(writer, docDir);

        // NOTE: if you want to maximize search performance,
        // you can optionally call forceMerge here.  This can be
        // a terribly costly operation, so generally it's only
        // worth it when your index is relatively static (ie
        // you're done adding documents to it):
        //
        // writer.forceMerge(1);

        writer.close();
        long Cendtime = System.nanoTime();// 
        Date end = new Date();

        System.out.println("");

        // ?

        BigDecimal diff = BigDecimal.valueOf(Cendtime - Cbegintime, 10);// 

        double time = diff.setScale(4, BigDecimal.ROUND_HALF_UP).doubleValue();

        System.out.println(end.getTime() - start.getTime() + "millsecond");

    } catch (IOException e) {
        System.out.println(" caught a " + e.getClass() + "\n with message: " + e.getMessage());
    }
}

From source file:com.lucid.solr.sidecar.SidecarIndexReaderFactory.java

License:Apache License

DirectoryReader buildParallelReader(DirectoryReader main, SolrIndexSearcher source, boolean rebuild) {
    try {/* w  w w  . ja va2 s.  c o  m*/
        if (source == null) {
            throw new Exception("Source collection is missing.");
        }
        // create as a sibling path of the main index
        Directory d = main.directory();
        File primaryDir = null;
        if (d instanceof FSDirectory) {
            String path = ((FSDirectory) d).getDirectory().getPath();
            primaryDir = new File(path);
            sidecarIndex = new File(primaryDir.getParentFile(), sidecarIndexLocation);
        } else {
            String secondaryPath = System.getProperty("java.io.tmpdir") + File.separator + sidecarIndexLocation
                    + "-" + System.currentTimeMillis();
            sidecarIndex = new File(secondaryPath);
        }
        // create a new tmp dir for the secondary indexes
        File secondaryIndex = new File(sidecarIndex, System.currentTimeMillis() + "-index");
        if (rebuild) {
            safeDelete(sidecarIndex);
        }
        parallelFields.addAll(source.getFieldNames());
        parallelFields.remove("id");
        LOG.debug("building a new index");
        Directory dir = FSDirectory.open(secondaryIndex);
        if (IndexWriter.isLocked(dir)) {
            // try forcing unlock
            try {
                IndexWriter.unlock(dir);
            } catch (Exception e) {
                LOG.warn("Failed to unlock " + secondaryIndex);
            }
        }
        int[] mergeTargets;
        AtomicReader[] subReaders = SidecarIndexReader.getSequentialSubReaders(main);
        if (subReaders == null || subReaders.length == 0) {
            mergeTargets = new int[] { main.maxDoc() };
        } else {
            mergeTargets = new int[subReaders.length];
            for (int i = 0; i < subReaders.length; i++) {
                mergeTargets[i] = subReaders[i].maxDoc();
            }
        }
        Version ver = currentCore.getLatestSchema().getDefaultLuceneMatchVersion();
        IndexWriterConfig cfg = new IndexWriterConfig(ver, currentCore.getLatestSchema().getAnalyzer());
        //cfg.setInfoStream(System.err);
        cfg.setMergeScheduler(new SerialMergeScheduler());
        cfg.setMergePolicy(new SidecarMergePolicy(mergeTargets, false));
        IndexWriter iw = new IndexWriter(dir, cfg);
        LOG.info("processing " + main.maxDoc() + " docs / " + main.numDeletedDocs() + " dels in main index");
        int boostedDocs = 0;
        Bits live = MultiFields.getLiveDocs(main);

        int targetPos = 0;
        int nextTarget = mergeTargets[targetPos];
        BytesRef idRef = new BytesRef();
        for (int i = 0; i < main.maxDoc(); i++) {
            if (i == nextTarget) {
                iw.commit();
                nextTarget = nextTarget + mergeTargets[++targetPos];
            }
            if (live != null && !live.get(i)) {
                addDummy(iw); // this is required to preserve doc numbers.
                continue;
            } else {
                DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor(docIdField);
                main.document(i, visitor);
                Document doc = visitor.getDocument();
                // get docId
                String id = doc.get(docIdField);
                if (id == null) {
                    LOG.debug("missing id, docNo=" + i);
                    addDummy(iw);
                    continue;
                } else {
                    // find the data, if any
                    doc = lookup(source, id, idRef, parallelFields);
                    if (doc == null) {
                        LOG.debug("missing boost data, docId=" + id);
                        addDummy(iw);
                        continue;
                    } else {
                        LOG.debug("adding boost data, docId=" + id + ", b=" + doc);
                        iw.addDocument(doc);
                        boostedDocs++;
                    }
                }
            }
        }
        iw.close();
        DirectoryReader other = DirectoryReader.open(dir);
        LOG.info("SidecarIndexReader with " + boostedDocs + " boosted documents.");
        SidecarIndexReader pr = createSidecarIndexReader(main, other, sourceCollection, secondaryIndex);
        return pr;
    } catch (Exception e) {
        LOG.warn("Unable to build parallel index: " + e.toString(), e);
        LOG.warn("Proceeding with single main index.");
        try {
            return new SidecarIndexReader(this, main, null, SidecarIndexReader.getSequentialSubReaders(main),
                    sourceCollection, null);
        } catch (Exception e1) {
            LOG.warn("Unexpected exception, returning single main index", e1);
            return main;
        }
    }
}