Example usage for org.apache.lucene.index IndexWriter close

List of usage examples for org.apache.lucene.index IndexWriter close

Introduction

In this page you can find the example usage for org.apache.lucene.index IndexWriter close.

Prototype

@Override
public void close() throws IOException 

Source Link

Document

Closes all open resources and releases the write lock.

Usage

From source file:com.edgenius.wiki.search.service.IndexServiceImpl.java

License:Open Source License

/**
 * /*ww  w.j  ava  2s . com*/
 */
public void rebuildCommentIndex() {
    //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    // page comment
    final List<PageComment> comments = commentDAO.getObjects();
    IndexWriter commentWriter = null;
    if (comments != null) {
        commentLock.lock();
        try {
            commentTemplate.addDocument(new IndexCallback() {

                @Override
                public void addDocument(IndexWriter commentWriter) {

                    for (PageComment comment : comments) {
                        try {
                            commentWriter.addDocument(createCommentDocument(comment));
                        } catch (Exception e) {
                            log.error("Rebuild index failed on comment. Owner page title "
                                    + comment.getPage().getTitle(), e);
                        }
                    }
                }
            });
        } finally {
            try {
                if (commentWriter != null)
                    commentWriter.close();
            } catch (Exception e) {
                log.error("Close comment index failed ", e);
            }
            commentLock.unlock();

            log.info("Comment index is rebuilt");
        }
    }
}

From source file:com.edu.lucene.IndexFiles.java

License:Apache License

/** Index all text files under a directory. */
public static void main(String[] args) {
    String usage = "index <root_directory>";
    if (args.length == 0) {
        System.err.println("Usage: " + usage);
        System.exit(1);/*from   ww w .j  ava  2s . c o  m*/
    }

    if (INDEX_DIR.exists()) {
        System.out.println("Cannot save index to '" + INDEX_DIR + "' directory, please delete it first");
        System.exit(1);
    }

    final File docDir = new File(args[0]);
    if (!docDir.exists() || !docDir.canRead()) {
        System.out.println("Document directory '" + docDir.getAbsolutePath()
                + "' does not exist or is not readable, please check the path");
        System.exit(1);
    }

    Date start = new Date();
    try {
        IndexWriter writer = new IndexWriter(FSDirectory.open(INDEX_DIR),
                new StandardAnalyzer(Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED);
        System.out.println("Indexing to directory '" + INDEX_DIR + "'...");
        indexDocs(writer, docDir);
        System.out.println("Optimizing...");
        writer.optimize();
        writer.close();

        Date end = new Date();
        System.out.println(end.getTime() - start.getTime() + " total milliseconds");

    } catch (IOException e) {
        System.out.println(" caught a " + e.getClass() + "\n with message: " + e.getMessage());
    }
}

From source file:com.ekinoks.lucene.introduction.demos.IndexFiles.java

License:Apache License

/** Index all text files under a directory. */
public static void main(String[] args) {

    String usage = "java org.apache.lucene.demo.IndexFiles"
            + " [-index INDEX_PATH] [-docs DOCS_PATH] [-update]\n\n"
            // TODO: Change the link with every release (or: fill in some
            // less error-prone alternative here...)
            + "See http://lucene.apache.org/java/3_1/demo.html for details.";
    String indexPath = "index";
    String docsPath = null;//from   w w w  .  j  a v a 2s. c  o  m
    boolean create = true;
    for (int i = 0; i < args.length; i++) {
        if ("-index".equals(args[i])) {
            indexPath = args[i + 1];
            i++;
        } else if ("-docs".equals(args[i])) {
            docsPath = args[i + 1];
            i++;
        } else if ("-update".equals(args[i])) {
            create = false;
        }
    }

    if (docsPath == null) {
        System.err.println("Usage: " + usage);
        System.exit(1);
    }

    final File docDir = new File(docsPath);
    if (!docDir.exists() || !docDir.canRead()) {
        System.out.println("Document directory '" + docDir.getAbsolutePath()
                + "' does not exist or is not readable, please check the path");
        System.exit(1);
    }

    Date start = new Date();
    try {
        System.out.println("Indexing to directory '" + indexPath + "'...");

        Directory dir = FSDirectory.open(new File(indexPath));

        Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_31);
        IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_31, analyzer);

        if (create) {
            // Create a new index in the directory, removing any
            // previously indexed documents:
            iwc.setOpenMode(OpenMode.CREATE);
        } else {
            // Add new documents to an existing index:
            iwc.setOpenMode(OpenMode.CREATE_OR_APPEND);
        }

        // Optional: for better indexing performance, if you
        // are indexing many documents, increase the RAM
        // buffer. But if you do this, increase the max heap
        // size to the JVM (eg add -Xmx512m or -Xmx1g):
        //
        // iwc.setRAMBufferSizeMB(256.0);

        IndexWriter writer = new IndexWriter(dir, iwc);
        indexDocs(writer, docDir);

        // NOTE: if you want to maximize search performance,
        // you can optionally call optimize here. This can be
        // a costly operation, so generally it's only worth
        // it when your index is relatively static (ie you're
        // done adding documents to it):
        //
        // writer.optimize();

        writer.close();

        Date end = new Date();
        System.out.println(end.getTime() - start.getTime() + " total milliseconds");

    } catch (IOException e) {
        System.out.println(" caught a " + e.getClass() + "\n with message: " + e.getMessage());
    }
}

From source file:com.emental.mindraider.core.search.SearchCommander.java

License:Apache License

/**
 * Generate search index.//from   ww  w  .  j  a v  a2 s .  com
 * 
 * @param directory
 *            the directory
 * @param rebuildSearchIndexJDialog
 *            the rebuild search index JDialog
 * @throws IOException
 *             the I/O exception
 */
public static void rebuild(String directory) throws IOException {
    if (StringUtils.isNotEmpty(directory)) {
        Date start = new Date();
        try {
            // experiment in here with analyzers
            // SearchEngine searchEngine = new
            // SearchEngine(getSearchIndexPath());
            // searchEngine.rebuildIndex();
            IndexWriter writer = new IndexWriter(getSearchIndexPath(), new StandardAnalyzer(), true);

            // infiltrated tags - remove the tag information
            MindRaider.tagCustodian.clear();
            SearchCommander.indexDocs(writer, new File(directory));
            writer.optimize();
            writer.close();

            Date end = new Date();
            String finalMessage = "FTS index rebuilt in " + (end.getTime() - start.getTime()) + " milliseconds";
            StatusBar.setText(finalMessage);
        } catch (IOException e) {
            System.err.println(e.getClass() + ": " + e.getMessage());
        } finally {
            try {
                MindRaider.tagCustodian.redraw();
                MindRaider.tagCustodian.toRdf();
            } catch (MindRaiderException e) {
                logger.error("Unable to save tag ontology!", e);
            }
        }
    }
}

From source file:com.emental.mindraider.core.search.SearchCommander.java

License:Apache License

public static void updateIndex(File conceptFile, String notebookLabel, String conceptLabel, String conceptUri) {
    // TODO the body of this method to by in asynchronous thread
    IndexWriter writer;
    try {// w w w. j  a v a  2s.co  m
        writer = new IndexWriter(getSearchIndexPath(), new StandardAnalyzer(), false);
        // update document via concept URI
        logger.debug("UPDATing FTS index for concept: " + conceptFile + " # " + notebookLabel + " # "
                + conceptLabel + " # " + conceptUri); // {{debug}}
        Document document = FileDocument.Document(conceptFile, notebookLabel, conceptLabel, conceptUri);
        writer.deleteDocuments(new Term("uri", conceptUri));
        writer.addDocument(document);
        // TODO removed just for now (before it will be done in async)
        //writer.optimize();
        writer.close();
    } catch (Exception e) {
        logger.debug("Unable to update FTS index", e); // {{debug}}
        // TODO close it in finally
    }
}

From source file:com.epimorphics.server.indexers.LuceneIndex.java

License:Apache License

protected void doCommit() {
    IndexWriter writer = getIndexWriter();
    try {/*from www . j a va2  s .  c om*/
        writer.commit();
    } catch (Exception e) {
        // try to save the data
        try {
            writer.close();
        } catch (IOException e1) {
            // Do nothing, we've already taken our best shot
            log.error("Failed to even close index writer after commit error", e);
        }
        throw new EpiException(e);
    }
}

From source file:com.esri.gpt.catalog.lucene.LuceneIndexAdapter.java

License:Apache License

/**
 * Closes an index writer.//from  w  w  w  . j ava 2  s . c  o m
 * @param writer the writer to close.
 */
protected void closeWriter(IndexWriter writer) {

    if (this.useSingleWriter) {
        try {
            if (writer != null) {
                getLogger().finer("Committing Lucene IndexWriter...");
                writer.commit();
            }
        } catch (CorruptIndexException e) {
            getLogger().log(Level.SEVERE, "Error on IndexWriter.commit", e);
        } catch (IOException e) {
            getLogger().log(Level.SEVERE, "Error on IndexWriter.commit", e);
        } catch (OutOfMemoryError e) {
            getLogger().log(Level.SEVERE, "Error on IndexWriter.commit", e);
            try {
                writer.close();
            } catch (CorruptIndexException e1) {
                getLogger().log(Level.SEVERE, "Error on IndexWriter.commit", e);
            } catch (IOException e1) {
                getLogger().log(Level.SEVERE, "Error on IndexWriter.commit", e);
            } finally {
                SINGLE_WRITER = null;
            }
        }
        return;
    }

    try {
        if (writer != null) {
            getLogger().finer("Closing Lucene IndexWriter...");
            writer.close();
        }
    } catch (Throwable t) {
        getLogger().log(Level.WARNING, "IndexWriter failed to close.", t);

        // There are times when closing the IndexWriter fails (typically FileNotFound while flushing).
        // This is a bit of a disaster, if it happens we're unsure if the index is corrupted, it
        // also leaves the write lock unreleased, which basically is terminal when a 
        // NativeFSLockFactory is active (you need to stop/start the web server)
        // This is an attempt to forcibly release the lock.
        if ((writer != null) && this.luceneConfig.getUseNativeFSLockFactory()) {
            java.lang.reflect.Field wlFld = null;
            try {
                wlFld = writer.getClass().getDeclaredField("writeLock");
            } catch (Throwable t2) {
            }
            if (wlFld != null) {
                boolean wasReleased = false;
                try {
                    wlFld.setAccessible(true);
                    Object wlObj = wlFld.get(writer);
                    if ((wlObj != null) && (wlObj instanceof Lock)) {
                        Lock wlLock = (Lock) wlObj;
                        wasReleased = !wlLock.isLocked();
                        if (!wasReleased) {
                            wlLock.release();
                            wasReleased = !wlLock.isLocked();
                        }
                    }
                } catch (Throwable t2) {
                    getLogger().log(Level.WARNING, "Unable to forcibly release an abandoned write lock.", t2);
                } finally {
                    String sMsg = "The IndexWriter failed to close, write-lock released: " + wasReleased;
                    getLogger().warning(sMsg);
                }
            }
        }

    }
}

From source file:com.esri.gpt.catalog.lucene.LuceneIndexAdapter.java

License:Apache License

/**
 * Fired when the servlet context is shutting down.
 * @param context the application context
 *///from w  w  w .j av a  2s.  c  om
public static synchronized void onContextDestroy(ApplicationContext context) {
    SINGLE_WRITER_WASDESTROYED = true;

    if (SINGLE_WRITER != null) {
        try {
            IndexWriter tmp = SINGLE_WRITER;
            SINGLE_WRITER = null;
            if (tmp != null) {
                tmp.close();
            }
        } catch (Exception e) {
            LOGGER.log(Level.SEVERE, "Error while closing single IndexWriter on destroy.", e);
        }
    }

    if (REFERENCED_SEARCHER != null) {
        try {
            REFERENCED_SEARCHER.close();
        } catch (Exception e) {
            LOGGER.log(Level.SEVERE, "Error while closing single IndexReader on destroy.", e);
        }
    }

}

From source file:com.esri.gpt.catalog.lucene.LuceneIndexOptimizer.java

License:Apache License

/**
 * Run the optimization process./*from  ww w.  ja v  a2  s . co m*/
 */
public void run() {
    LOGGER.info("Optimization run started...");
    RequestContext context = null;
    IndexWriter writer = null;
    Lock backgroundLock = null;
    long tStartMillis = System.currentTimeMillis();
    try {

        // initialize
        context = RequestContext.extract(null);
        LuceneIndexAdapter adapter = new LuceneIndexAdapter(context);
        adapter.touch(); // ensures that a proper directory structure exists
        if (this.checkInterrupted())
            return;

        // obtain the background thread lock, 
        // sleep for 10 minutes if busy then try again
        try {
            backgroundLock = adapter.obtainBackgroundLock();
        } catch (LockObtainFailedException lofe) {
            if (this.checkInterrupted())
                return;
            try {
                Thread.sleep(10 * 1000);
            } catch (InterruptedException e) {
                throw new IOException(e.toString());
            }
            if (this.checkInterrupted())
                return;
            backgroundLock = adapter.obtainBackgroundLock();
        }

        // optimize the index
        writer = adapter.newWriter();
        if (this.checkInterrupted())
            return;
        writer.optimize();
        adapter.closeWriter(writer);
        writer = null;

        // log the summary message
        double dSec = (System.currentTimeMillis() - tStartMillis) / 1000.0;
        StringBuffer msg = new StringBuffer();
        msg.append("Optimization run completed.");
        msg.append(", runtime: ");
        msg.append(Math.round(dSec / 60.0 * 100.0) / 100.0).append(" minutes");
        if (dSec <= 600) {
            msg.append(", ").append(Math.round(dSec * 100.0) / 100.0).append(" seconds");
        }
        LOGGER.info(msg.toString());

    } catch (LockObtainFailedException e) {
        LOGGER.log(Level.INFO, "Optimization run aborted, reason: " + e.getMessage());
    } catch (Throwable t) {
        LOGGER.log(Level.SEVERE, "Error optimizing index.", t);
    } finally {
        if (writer != null) {
            try {
                writer.close();
            } catch (Throwable t) {
                LOGGER.log(Level.SEVERE, "Error closing IndexWriter.", t);
            }
        }
        if (backgroundLock != null) {
            try {
                backgroundLock.release();
            } catch (Throwable t) {
                LOGGER.log(Level.WARNING, "Error releasing lock.", t);
            }
        }
        if (context != null) {
            context.onExecutionPhaseCompleted();
        }
        if (this.wasInterrupted) {
            LOGGER.info("LuceneIndexOptimizer run was interrupted.");
        }
    }

    // optimize the assertion indexes
    AsnFactory asnFactory = AsnFactory.newFactory(null);
    AsnConfig asnConfig = asnFactory.getConfiguration();
    if (asnConfig.getAreAssertionsEnabled()) {
        AsnIndexReferences asnIndexRefs = asnConfig.getIndexReferences();
        if (asnIndexRefs != null) {
            for (AsnIndexReference asnIndexRef : asnIndexRefs.values()) {
                if ((asnIndexRef != null) && asnIndexRef.getEnabled()) {
                    String asnLoc = asnIndexRef.getIndexLocation();
                    LOGGER.fine("Optimizing assertion index: " + asnLoc);
                    try {
                        long asnStartMillis = System.currentTimeMillis();
                        AsnIndexAdapter asnIndexAdapter = asnIndexRef.makeIndexAdapter(null);
                        asnIndexAdapter.optimize();

                        double asnSec = (System.currentTimeMillis() - asnStartMillis) / 1000.0;
                        StringBuffer msg = new StringBuffer();
                        msg.append("Optimization of assertion index complete: " + asnLoc);
                        msg.append(", runtime: ");
                        msg.append(Math.round(asnSec / 60.0 * 100.0) / 100.0).append(" minutes");
                        if (asnSec <= 600) {
                            msg.append(", ").append(Math.round(asnSec * 100.0) / 100.0).append(" seconds");
                        }
                        LOGGER.fine(msg.toString());

                    } catch (Exception e) {
                        LOGGER.log(Level.SEVERE, "Error optimizing assertion index: " + asnLoc, e);
                    }
                }
            }
        }
    }

}

From source file:com.esri.gpt.server.assertion.index.AsnBaseIndexAdapter.java

License:Apache License

/**
 * Closes an index writer.//  w ww .  j a  v  a  2  s .  c  o m
 * @param writer the writer to close
 */
public void closeWriter(IndexWriter writer) {
    if (writer != null) {
        try {
            writer.close();
        } catch (CorruptIndexException e) {
            LOGGER.log(Level.SEVERE, "IndexWriter failed to close.", e);
        } catch (IOException e) {
            LOGGER.log(Level.SEVERE, "IndexWriter failed to close.", e);
        }
    }
}