Example usage for org.apache.lucene.index DirectoryReader indexExists

List of usage examples for org.apache.lucene.index DirectoryReader indexExists

Introduction

In this page you can find the example usage for org.apache.lucene.index DirectoryReader indexExists.

Prototype

public static boolean indexExists(Directory directory) throws IOException 

Source Link

Document

Returns true if an index likely exists at the specified directory.

Usage

From source file:de.jetsli.lumeo.RawLucene.java

License:Apache License

public RawLucene init() {
    indexLock();/*from w  w w  . j  av a  2  s  .c  om*/
    try {
        if (closed)
            throw new IllegalStateException("Already closed");

        if (writer != null)
            throw new IllegalStateException("Already initialized");

        // release locks when started
        if (IndexWriter.isLocked(dir)) {
            logger.warn("index is locked + " + name + " -> releasing lock");
            IndexWriter.unlock(dir);
        }
        IndexWriterConfig cfg = new IndexWriterConfig(VERSION, defaultMapping.getCombinedAnalyzer());
        LogByteSizeMergePolicy mp = new LogByteSizeMergePolicy();
        mp.setMaxMergeMB(getMaxMergeMB());
        cfg.setRAMBufferSizeMB(ramBufferSizeMB);
        cfg.setTermIndexInterval(termIndexIntervalSize);
        cfg.setMergePolicy(mp);

        // TODO specify different formats for id fields etc
        // -> this breaks 16 of our tests!? Lucene Bug?
        //            cfg.setCodec(new Lucene40Codec() {
        //
        //                @Override public PostingsFormat getPostingsFormatForField(String field) {
        //                    return new Pulsing40PostingsFormat();
        //                }
        //            });

        // cfg.setMaxThreadStates(8);
        boolean create = !DirectoryReader.indexExists(dir);
        cfg.setOpenMode(create ? IndexWriterConfig.OpenMode.CREATE : IndexWriterConfig.OpenMode.APPEND);

        //wrap the writer with a tracking index writer
        writer = new TrackingIndexWriter(new IndexWriter(dir, cfg));

        nrtManager = new NRTManager(writer, new SearcherFactory() {
            //              @Override
            //              public IndexSearcher newSearcher(IndexReader reader) throws IOException {
            //                //TODO do some kind of warming here?
            //                return new IndexSearcher(reader);
            //              }              
        });

        getCurrentRTCache(latestGen);
        int priority = Math.min(Thread.currentThread().getPriority() + 2, Thread.MAX_PRIORITY);
        flushThread = new FlushThread("flush-thread");
        flushThread.setPriority(priority);
        flushThread.setDaemon(true);
        flushThread.start();

        reopenThread = new NRTManagerReopenThread(nrtManager, ordinaryWaiting, incomingSearchesMaximumWaiting);
        reopenThread.setName("NRT Reopen Thread");
        reopenThread.setPriority(priority);
        reopenThread.setDaemon(true);
        reopenThread.start();
        return this;
    } catch (Exception e) {
        throw new RuntimeException(e);
    } finally {
        indexUnlock();
    }
}

From source file:de.twitterlivesearch.analysis.Searcher.java

License:Apache License

/**
 * This is the same as//from w  w w  .  j  ava  2  s  . co m
 * {@link de.twitterlivesearch.analysis.Searcher#searchForTweets(String)
 * searchForTweets(String)}, but the search is limited to the tweet with the
 * given id. This can for example be used to analyze the latest incoming
 * tweet.
 *
 * @param id
 * @param queryString
 * @return
 */
public List<Document> searchForTweets(Integer id, String queryString) {
    if (queryString.isEmpty()) {
        return Collections.emptyList();
    }

    AbstractConfiguration config = ConfigurationHolder.getConfiguration();
    try {
        if (!DirectoryReader.indexExists(directory)) {
            return null;
        }
    } catch (IOException e) {
        log.fatal("Error when trying to check if directory exists!", e);
        return new ArrayList<>();
    }
    DirectoryReader ireader;
    try {
        ireader = DirectoryReader.open(directory);
    } catch (IOException e) {
        log.fatal("Error when trying to open directory!", e);
        return null;
    }

    IndexSearcher isearcher = new IndexSearcher(ireader);
    Query textQuery = null;
    QueryParser parser = new QueryParser(FieldNames.TEXT.getField(),
            AnalyzerMapping.getInstance().ANALYZER_FOR_DELIMITER);
    parser.setDefaultOperator(config.getDefaultOperator());
    BooleanQuery query = new BooleanQuery();
    try {
        textQuery = parser.parse(queryString);
    } catch (ParseException e) {
        log.fatal("Error while parsing query: " + queryString, e);
    }

    // if id does not equal null only the query with the given id will be
    // searched
    // this can be used to search the latest element only
    if (id != null) {
        Query idQuery = NumericRangeQuery.newIntRange(FieldNames.ID.getField(), id.intValue(), id.intValue(),
                true, true);
        query.add(idQuery, Occur.MUST);
    }
    query.add(textQuery, Occur.MUST);
    ScoreDoc[] hits = null;
    try {
        hits = isearcher.search(query, 1000).scoreDocs;
    } catch (IOException e) {
        log.fatal("Error while trying to search!", e);
    }
    List<Document> result = new ArrayList<>();
    for (int i = 0; i < hits.length; i++) {
        try {
            result.add(isearcher.doc(hits[i].doc));
            log.info("Found result for query \"" + queryString + "\".");
        } catch (IOException e) {
            log.fatal("Error when getting document!", e);
        }
    }
    return result;
}

From source file:edu.illinois.cs.cogcomp.bigdata.lucene.Lucene.java

License:Open Source License

public static boolean indexExists(String pageIndex) {
    try {//  ww  w  . ja  v  a  2  s .  co  m
        return DirectoryReader.indexExists(new NIOFSDirectory(Paths.get(pageIndex)));
    } catch (IOException e) {
        e.printStackTrace();
    }
    return false;
}

From source file:edu.stanford.muse.index.Indexer.java

License:Apache License

/**
 * sets up indexer just for reading... if needed for writing only, call
 * setupForWrite. if need both read & write, call both.
 *//*w w w.j  a v a  2  s  .  c  om*/
synchronized void setupForRead() {
    log.info("setting up index for read only access");
    long startTime = System.currentTimeMillis();

    //closeHandles();
    try {
        setupDirectory();

        String[] defaultSearchFields, defaultSearchFieldsOriginal;
        String[] defaultSearchFieldSubject = new String[] { "title" }; // for subject only search
        String[] defaultSearchFieldCorrespondents;
        //body field should be there, as the content of the attachment lies in this field, should also include meta field?
        //why the search over en-names and en-names-original when body/body_original is included in the search fields?
        defaultSearchFields = new String[] { "body", "title", "to_names", "from_names", "cc_names", "bcc_names",
                "to_emails", "from_emails", "cc_emails", "bcc_emails" };
        defaultSearchFieldsOriginal = new String[] { "body_original", "title" }; // we want to leave title there because we want to always hit the title -- discussed with Peter June 27 2015
        defaultSearchFieldCorrespondents = new String[] { "to_names", "from_names", "cc_names", "bcc_names",
                "to_emails", "from_emails", "cc_emails", "bcc_emails" };
        // names field added above after email discussion with Sit 6/11/2013. problem is that we're not using the Lucene EnglishPossessiveFilter, so
        // NER will extract the name Stanford University in a sentence like:
        // "This is Stanford University's website."
        // but when the user clicks on the name "Stanford University" in say monthly cards, we
        // will not match the message with this sentence because of the apostrophe.

        //for searching an attchment with fileName
        String[] metaSearchFields = new String[] { "fileName" };
        // Parse a simple query that searches for "text":
        if (parser == null) {
            //parser = new QueryParser(MUSE_LUCENE_VERSION, defaultSearchField, analyzer);
            parser = new MultiFieldQueryParser(LUCENE_VERSION, defaultSearchFields, analyzer);
            parserOriginal = new MultiFieldQueryParser(LUCENE_VERSION, defaultSearchFieldsOriginal, analyzer);
            parserSubject = new MultiFieldQueryParser(LUCENE_VERSION, defaultSearchFieldSubject, analyzer);
            parserCorrespondents = new MultiFieldQueryParser(LUCENE_VERSION, defaultSearchFieldCorrespondents,
                    analyzer);
            parserMeta = new MultiFieldQueryParser(LUCENE_VERSION, metaSearchFields, new KeywordAnalyzer());
        }

        /**
         * Bunch of gotchas here
         * Its a bad idea to store lucene internal docIds, as no assumptions about the internal docIds should be made;
         * not even that they are serial. When searching, lucene may ignore logically deleted docs.
         * Lucene does not handle deleted docs, and having these docs in search may bring down the search performance by 50%
         * Deleted docs are cleaned only during merging of indices.*/
        int numContentDocs = 0, numContentDeletedDocs = 0, numAttachmentDocs = 0, numAttachmentDeletedDocs = 0;
        if (DirectoryReader.indexExists(directory)) {
            DirectoryReader ireader = DirectoryReader.open(directory);
            if (ireader.numDeletedDocs() > 0)
                log.warn("!!!!!!!\nIndex reader has " + ireader.numDocs() + " doc(s) of which "
                        + ireader.numDeletedDocs() + " are deleted)\n!!!!!!!!!!");
            isearcher = new IndexSearcher(ireader);
            contentDocIds = new LinkedHashMap<>();
            numContentDocs = ireader.numDocs();
            numContentDeletedDocs = ireader.numDeletedDocs();

            Bits liveDocs = MultiFields.getLiveDocs(ireader);
            Set<String> fieldsToLoad = new HashSet<>();
            fieldsToLoad.add("docId");
            for (int i = 0; i < ireader.maxDoc(); i++) {
                org.apache.lucene.document.Document doc = ireader.document(i, fieldsToLoad);
                if (liveDocs != null && !liveDocs.get(i))
                    continue;

                if (doc == null || doc.get("docId") == null)
                    continue;
                contentDocIds.put(i, doc.get("docId"));
            }
            log.info("Loaded: " + contentDocIds.size() + " content docs");
        }

        if (DirectoryReader.indexExists(directory_blob)) {
            IndexReader ireader_blob = DirectoryReader.open(directory_blob);
            isearcher_blob = new IndexSearcher(ireader_blob); // read-only=true
            blobDocIds = new LinkedHashMap<Integer, String>();

            numAttachmentDocs = ireader_blob.numDocs();
            numAttachmentDeletedDocs = ireader_blob.numDeletedDocs();

            Bits liveDocs = MultiFields.getLiveDocs(ireader_blob);
            Set<String> fieldsToLoad = new HashSet<String>();
            fieldsToLoad.add("docId");
            for (int i = 0; i < ireader_blob.maxDoc(); i++) {
                org.apache.lucene.document.Document doc = ireader_blob.document(i, fieldsToLoad);
                if (liveDocs != null && !liveDocs.get(i))
                    continue;

                if (doc == null || doc.get("docId") == null)
                    continue;
                blobDocIds.put(i, doc.get("docId"));
            }
            log.info("Loaded: " + blobDocIds.size() + " attachment docs");
        }

        log.warn("Number of content docs: " + numContentDocs + ", number deleted: " + numContentDeletedDocs);
        log.warn("Number of attachment docs: " + numAttachmentDocs + ", number deleted: "
                + numAttachmentDeletedDocs);

        if (dirNameToDocIdMap == null)
            dirNameToDocIdMap = new LinkedHashMap<String, Map<Integer, String>>();
    } catch (Exception e) {
        Util.print_exception(e, log);
    }
    log.info("Setting up index for read took " + (System.currentTimeMillis() - startTime) + " ms");
}

From source file:edu.usc.ir.geo.gazetteer.GeoNameResolver.java

License:Apache License

/**
 * Build the gazetteer index line by line
 *
 * @param gazetteerPath/*from w  ww.j  a v  a2s .  c  om*/
 *            path of the gazetteer file
 * @param indexerPath
 *            path to the created Lucene index directory.
 * @throws IOException
 * @throws RuntimeException
 */
public void buildIndex(String gazetteerPath, String indexerPath) throws IOException {
    File indexfile = new File(indexerPath);
    indexDir = FSDirectory.open(indexfile.toPath());
    if (!DirectoryReader.indexExists(indexDir)) {
        IndexWriterConfig config = new IndexWriterConfig(analyzer);
        indexWriter = new IndexWriter(indexDir, config);
        Logger logger = Logger.getLogger(this.getClass().getName());
        logger.log(Level.WARNING, "Start Building Index for Gazatteer");
        BufferedReader filereader = new BufferedReader(
                new InputStreamReader(new FileInputStream(gazetteerPath), "UTF-8"));
        String line;
        int count = 0;
        while ((line = filereader.readLine()) != null) {
            try {
                count += 1;
                if (count % 100000 == 0) {
                    logger.log(Level.INFO, "Indexed Row Count: " + count);
                }
                addDoc(indexWriter, line);

            } catch (RuntimeException re) {
                logger.log(Level.WARNING, "Skipping... Error on line: {}", line);
            }
        }
        logger.log(Level.WARNING, "Building Finished");
        filereader.close();
        indexWriter.close();
    }
}

From source file:fr.paris.lutece.plugins.directory.service.directorysearch.DirectorySearchService.java

License:Open Source License

/**
 * Process indexing/*  w  ww .j a  va 2 s  . c o m*/
 * @param bCreate true for start full indexing
 *            false for begin incremental indexing
 * @return the log
 */
public String processIndexing(boolean bCreate) {
    StringBuffer sbLogs = new StringBuffer();
    IndexWriter writer = null;
    boolean bCreateIndex = bCreate;

    try {
        sbLogs.append("\r\nIndexing all contents ...\r\n");

        if (!DirectoryReader.indexExists(_luceneDirectory)) { //init index
            bCreateIndex = true;
        }

        if (!bCreateIndex && IndexWriter.isLocked(_luceneDirectory)) {
            IndexWriter.unlock(_luceneDirectory);
        }

        IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_46, _analyzer);

        if (bCreateIndex) {
            conf.setOpenMode(OpenMode.CREATE);
        } else {
            conf.setOpenMode(OpenMode.APPEND);
        }

        writer = new IndexWriter(_luceneDirectory, conf);

        Date start = new Date();

        sbLogs.append("\r\n<strong>Indexer : ");
        sbLogs.append(_indexer.getName());
        sbLogs.append(" - ");
        sbLogs.append(_indexer.getDescription());
        sbLogs.append("</strong>\r\n");
        _indexer.processIndexing(writer, bCreateIndex, sbLogs);

        Date end = new Date();

        sbLogs.append("Duration of the treatment : ");
        sbLogs.append(end.getTime() - start.getTime());
        sbLogs.append(" milliseconds\r\n");
    } catch (Exception e) {
        sbLogs.append(" caught a ");
        sbLogs.append(e.getClass());
        sbLogs.append("\n with message: ");
        sbLogs.append(e.getMessage());
        sbLogs.append("\r\n");
        AppLogService.error("Indexing error : " + e.getMessage(), e);
    } finally {
        try {
            if (writer != null) {
                writer.close();
            }
        } catch (IOException e) {
            AppLogService.error(e.getMessage(), e);
        }
    }

    return sbLogs.toString();
}

From source file:fr.paris.lutece.plugins.document.service.docsearch.DocSearchService.java

License:Open Source License

/**
 * Indexing documents for searching/*from   w ww .  ja v  a 2  s.  c  om*/
 * @param bCreate tell if it's total indexing or total (total = true)
 * @return indexing logs
 */
public String processIndexing(boolean bCreate) {
    StringBuilder sbLogs = new StringBuilder();

    IndexWriter writer = null;
    boolean bCreateIndex = bCreate;

    try {
        sbLogs.append("\r\nIndexing all contents ...\r\n");

        Directory dir = NIOFSDirectory.open(new File(_strIndex));

        if (!DirectoryReader.indexExists(dir)) { //init index
            bCreateIndex = true;
        }

        Date start = new Date();
        IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_46, _analyzer);

        if (bCreateIndex) {
            conf.setOpenMode(OpenMode.CREATE);
        } else {
            conf.setOpenMode(OpenMode.APPEND);
        }

        writer = new IndexWriter(dir, conf);

        if (!bCreateIndex) {
            //incremental indexing

            //add all document which must be add
            for (IndexerAction action : getAllIndexerActionByTask(IndexerAction.TASK_CREATE)) {
                try {
                    ArrayList<Integer> luceneDocumentId = new ArrayList<Integer>();
                    luceneDocumentId.add(action.getIdDocument());

                    List<org.apache.lucene.document.Document> luceneDocument = _indexer
                            .getDocuments(luceneDocumentId);

                    if ((luceneDocument != null) && (luceneDocument.size() > 0)) {
                        Iterator<org.apache.lucene.document.Document> it = luceneDocument.iterator();

                        while (it.hasNext()) {
                            org.apache.lucene.document.Document doc = it.next();
                            writer.addDocument(doc);
                            sbLogs.append("Adding ");
                            sbLogs.append(doc.get(DocSearchItem.FIELD_TYPE));
                            sbLogs.append(" #");
                            sbLogs.append(doc.get(DocSearchItem.FIELD_UID));
                            sbLogs.append(" - ");
                            sbLogs.append(doc.get(DocSearchItem.FIELD_TITLE));
                            sbLogs.append("\r\n");
                        }
                    }
                } catch (IOException e) {
                    sbLogs.append("Error durign document indexation parsing.");
                    sbLogs.append("\r\n");
                }

                removeIndexerAction(action.getIdAction());
            }

            //Update all document which must be update
            for (IndexerAction action : getAllIndexerActionByTask(IndexerAction.TASK_MODIFY)) {
                try {
                    ArrayList<Integer> luceneDocumentId = new ArrayList<Integer>();
                    luceneDocumentId.add(action.getIdDocument());

                    List<org.apache.lucene.document.Document> luceneDocument = _indexer
                            .getDocuments(luceneDocumentId);

                    if ((luceneDocument != null) && (luceneDocument.size() > 0)) {
                        Iterator<org.apache.lucene.document.Document> it = luceneDocument.iterator();

                        while (it.hasNext()) {
                            org.apache.lucene.document.Document doc = it.next();
                            writer.updateDocument(
                                    new Term(DocSearchItem.FIELD_UID, Integer.toString(action.getIdDocument())),
                                    doc);
                            sbLogs.append("Updating ");
                            sbLogs.append(doc.get(DocSearchItem.FIELD_TYPE));
                            sbLogs.append(" #");
                            sbLogs.append(doc.get(DocSearchItem.FIELD_UID));
                            sbLogs.append(" - ");
                            sbLogs.append(doc.get(DocSearchItem.FIELD_TITLE));
                            sbLogs.append("\r\n");
                        }
                    }
                } catch (IOException e) {
                    sbLogs.append("Error durign document indexation parsing.");
                    sbLogs.append("\r\n");
                }

                removeIndexerAction(action.getIdAction());
            }

            //delete all document which must be delete
            for (IndexerAction action : getAllIndexerActionByTask(IndexerAction.TASK_DELETE)) {
                writer.deleteDocuments(
                        new Term(DocSearchItem.FIELD_UID, Integer.toString(action.getIdDocument())));
                sbLogs.append("Deleting ");
                sbLogs.append(" #");
                sbLogs.append(action.getIdDocument());
                sbLogs.append("\r\n");

                removeIndexerAction(action.getIdAction());
            }
        } else {
            //delete all incremental action
            removeAllIndexerAction();

            Collection<Integer> listIdDocuments = DocumentHome.findAllPrimaryKeys();
            ArrayList<Integer> luceneDocumentId;

            for (Integer nIdDocument : listIdDocuments) {
                try {
                    luceneDocumentId = new ArrayList<Integer>();
                    luceneDocumentId.add(nIdDocument);

                    List<Document> listDocuments = _indexer.getDocuments(luceneDocumentId);

                    for (Document doc : listDocuments) {
                        writer.addDocument(doc);
                        sbLogs.append("Indexing ");
                        sbLogs.append(doc.get(DocSearchItem.FIELD_TYPE));
                        sbLogs.append(" #");
                        sbLogs.append(doc.get(DocSearchItem.FIELD_UID));
                        sbLogs.append(" - ");
                        sbLogs.append(doc.get(DocSearchItem.FIELD_TITLE));
                        sbLogs.append("\r\n");
                    }
                } catch (IOException e) {
                    sbLogs.append("Error durign document indexation parsing.");
                    sbLogs.append("\r\n");
                }
            }
        }

        Date end = new Date();
        sbLogs.append("Duration of the treatment : ");
        sbLogs.append(end.getTime() - start.getTime());
        sbLogs.append(" milliseconds\r\n");
    } catch (Exception e) {
        sbLogs.append(" caught a ");
        sbLogs.append(e.getClass());
        sbLogs.append("\n with message: ");
        sbLogs.append(e.getMessage());
        sbLogs.append("\r\n");
        AppLogService.error("Indexing error : " + e.getMessage(), e);
    } finally {
        try {
            if (writer != null) {
                writer.close();
            }
        } catch (IOException e) {
            AppLogService.error(e.getMessage(), e);
        }
    }

    return sbLogs.toString();
}

From source file:fr.paris.lutece.plugins.search.solr.indexer.SolrIndexerService.java

License:Open Source License

/**
 * Process the indexing//  w  w w .j a v  a2 s  . c  o m
 * @param bCreate tell if it's total indexing or total (total = true)
 * @return the result log of the indexing
 */
public static synchronized String processIndexing(boolean bCreate) {
    // String buffer for building the response page;
    _sbLogs = new StringBuffer();

    Plugin plugin = PluginService.getPlugin(SolrPlugin.PLUGIN_NAME);

    boolean bCreateIndex = bCreate;
    String strWebappName = getWebAppName();

    try {
        Directory dir = IndexationService.getDirectoryIndex();

        if (!DirectoryReader.indexExists(dir)) { //init index
            bCreateIndex = true;
        }

        Date start = new Date();

        if (bCreateIndex) {
            _sbLogs.append("\r\nIndexing all contents ...\r\n");

            // Remove all indexed values of this site
            SOLR_SERVER.deleteByQuery(SearchItem.FIELD_UID + ":" + strWebappName
                    + SolrConstants.CONSTANT_UNDERSCORE + SolrConstants.CONSTANT_WILDCARD);

            for (SolrIndexer solrIndexer : INDEXERS) {
                if (solrIndexer.isEnable()) {
                    _sbLogs.append("\r\n<strong>Indexer : ");
                    _sbLogs.append(solrIndexer.getName());
                    _sbLogs.append(" - ");
                    _sbLogs.append(solrIndexer.getDescription());
                    _sbLogs.append("</strong>\r\n");

                    //the indexer will call write(doc)
                    List<String> lstErrors = solrIndexer.indexDocuments();

                    if (lstErrors != null) {
                        for (String strError : lstErrors) {
                            _sbLogs.append("<strong>ERROR : ");
                            _sbLogs.append(strError);
                            _sbLogs.append("</strong>\r\n");
                        }
                    }
                }
            }

            // Remove all actions of the database
            SolrIndexerActionHome.removeAll(plugin);
        } else {
            _sbLogs.append("\r\nIncremental Indexing ...\r\n");

            //incremental indexing
            Collection<SolrIndexerAction> actions = SolrIndexerActionHome.getList(plugin);

            for (SolrIndexerAction action : actions) {
                // catch any exception coming from an indexer to prevent global indexation to fail
                try {
                    SolrIndexer indexer = findSolrIndexer(action.getTypeResource());

                    if (indexer == null) {
                        _sbLogs.append(" - ERROR : ");
                        _sbLogs.append(" No indexer found for the resource name : ")
                                .append(action.getTypeResource());
                        _sbLogs.append("</strong>\r\n");

                        continue;
                    }

                    if (action.getIdTask() == IndexerAction.TASK_DELETE) {
                        if (action.getIdPortlet() != IndexationService.ALL_DOCUMENT) {
                            //delete only the index linked to this portlet
                            SOLR_SERVER.deleteByQuery(SearchItem.FIELD_DOCUMENT_PORTLET_ID + ":"
                                    + action.getIdDocument() + "&" + Integer.toString(action.getIdPortlet()));
                        } else {
                            //delete all index linked to uid. We get the uid of the resource to prefix it like we do during the indexation 
                            SOLR_SERVER.deleteByQuery(SearchItem.FIELD_UID + ":" + strWebappName
                                    + SolrConstants.CONSTANT_UNDERSCORE
                                    + indexer.getResourceUid(action.getIdDocument(), action.getTypeResource()));
                        }

                        _sbLogs.append("Deleting ");
                        _sbLogs.append(" #");
                        _sbLogs.append(action.getIdDocument());
                        _sbLogs.append("\r\n");
                    } else {
                        List<SolrItem> lstItems = indexer.getDocuments(action.getIdDocument());

                        if ((lstItems != null) && !lstItems.isEmpty()) {
                            for (SolrItem item : lstItems) {
                                if ((action.getIdPortlet() == IndexationService.ALL_DOCUMENT)
                                        || ((item.getDocPortletId() != null) && item.getDocPortletId()
                                                .equals(item.getUid() + "&" + action.getIdPortlet()))) {
                                    if (action.getIdTask() == IndexerAction.TASK_CREATE) {
                                        _sbLogs.append("Adding ");
                                    } else if (action.getIdTask() == IndexerAction.TASK_MODIFY) {
                                        _sbLogs.append("Updating ");
                                    }

                                    SOLR_SERVER.add(solrItem2SolrInputDocument(item));
                                    SOLR_SERVER.commit();

                                    _sbLogs.append(item.getType());
                                    _sbLogs.append(" #");
                                    _sbLogs.append(item.getUid());
                                    _sbLogs.append(" - ");
                                    _sbLogs.append(item.getTitle());
                                    _sbLogs.append("\r\n");
                                }
                            }
                        }
                    }

                    SolrIndexerActionHome.remove(action.getIdAction(), plugin);
                } catch (Exception e) {
                    _sbLogs.append("\r\n<strong>Action from indexer : ");
                    _sbLogs.append(action.getIndexerName());
                    _sbLogs.append(" Action ID : ").append(action.getIdAction()).append(" - Document ID : ")
                            .append(action.getIdDocument());
                    _sbLogs.append(" - ERROR : ");
                    _sbLogs.append(e.getMessage())
                            .append((e.getCause() != null) ? (" : " + e.getCause().getMessage())
                                    : SolrConstants.CONSTANT_EMPTY_STRING);
                    _sbLogs.append("</strong>\r\n");
                }
            }

            //reindexing all pages.
            SOLR_SERVER.deleteByQuery(SearchItem.FIELD_TYPE + ":" + PARAM_TYPE_PAGE);

            for (SolrIndexer indexer : INDEXERS) {
                if (indexer.isEnable() && SolrPageIndexer.NAME.equals(indexer.getName())) {
                    indexer.indexDocuments();

                    break;
                }
            }
        }

        SOLR_SERVER.commit();
        SOLR_SERVER.optimize();

        Date end = new Date();
        _sbLogs.append("Duration of the treatment : ");
        _sbLogs.append(end.getTime() - start.getTime());
        _sbLogs.append(" milliseconds\r\n");
    } catch (Exception e) {
        _sbLogs.append(" caught a ");
        _sbLogs.append(e.getClass());
        _sbLogs.append("\n with message: ");
        _sbLogs.append(e.getMessage());
        _sbLogs.append("\r\n");
        AppLogService.error("Indexing error : " + e.getMessage(), e);
    }

    return _sbLogs.toString();
}

From source file:io.github.msurdi.redeye.core.lucene.AbstractIndex.java

License:Apache License

/**
 * This method provides a very simplistic view of the lucene status.
 *
 * @return true if the lucene exists, false otherwise.
 * @throws IOException/*  ww w .  j ava 2  s.  co m*/
 */
@Override
public boolean isOk() throws IOException {
    return DirectoryReader.indexExists(index);
}

From source file:it.giacomobergami.lucenepdfindexer.lucene.ClosedLuceneIndex.java

License:Open Source License

public boolean exists() {
    try {/* w  w  w  . j  a  v a 2s .c  om*/
        return DirectoryReader.indexExists(index);
    } catch (IOException e) {
        e.printStackTrace();
        return false;
    }
}