Example usage for org.apache.lucene.index IndexWriterConfig setOpenMode

List of usage examples for org.apache.lucene.index IndexWriterConfig setOpenMode

Introduction

In this page you can find the example usage for org.apache.lucene.index IndexWriterConfig setOpenMode.

Prototype

public IndexWriterConfig setOpenMode(OpenMode openMode) 

Source Link

Document

Specifies OpenMode of the index.

Usage

From source file:fr.inra.maiage.bibliome.alvisnlp.bibliomefactory.modules.alvisdb.AlvisDBIndexer.java

License:Apache License

@Override
public void process(ProcessingContext<Corpus> ctx, Corpus corpus) throws ModuleException {
    try (KeywordAnalyzer kwa = new KeywordAnalyzer()) {
        IndexWriterConfig writerConfig = new IndexWriterConfig(Version.LUCENE_36, kwa);
        writerConfig.setOpenMode(append ? OpenMode.CREATE_OR_APPEND : OpenMode.CREATE);
        try (Directory dir = FSDirectory.open(indexDir)) {
            try (IndexWriter writer = new IndexWriter(dir, writerConfig)) {
                AlvisDBIndexerResolvedObjects resObj = getResolvedObjects();
                Logger logger = getLogger(ctx);
                EvaluationContext evalCtx = new EvaluationContext(logger);
                for (ADBElements.Resolved ent : resObj.elements) {
                    ent.indexElements(logger, writer, evalCtx, corpus);
                }//from ww  w. ja v  a 2  s.c om
            }
        } catch (IOException e) {
            throw new ProcessingException(e);
        }
    }
}

From source file:fr.inra.maiage.bibliome.util.pubmed.PubMedIndexUpdater.java

private static IndexWriterConfig getIndexWriterConfig() {
    Analyzer analyzer = PubMedIndexUtils.getGlobalAnalyzer();
    IndexWriterConfig result = new IndexWriterConfig(PubMedIndexUtils.LUCENE_VERSION, analyzer);
    result.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND);
    return result;
}

From source file:fr.lipn.yasemir.indexing.YasemirIndexBuilder.java

License:Open Source License

/**
 * Method that starts the actual indexing
 *//*from  w w w  .  j a  v  a2  s  . com*/
public void run() {
    Date start = new Date();
    try {
        System.err.println("[YaSemIR] Indexing to directory '" + indexPath + "'...");

        Directory dir = FSDirectory.open(new File(indexPath));

        //IndexWriter Configuration
        IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_44, Yasemir.analyzer);
        if (Yasemir.SCORE.equals("BM25"))
            iwc.setSimilarity(new BM25Similarity());
        else
            iwc.setSimilarity(new DefaultSimilarity());

        if (create) {
            // Create a new index in the directory, removing any
            // previously indexed documents:
            iwc.setOpenMode(OpenMode.CREATE);
        } else {
            // Add new documents to an existing index:
            iwc.setOpenMode(OpenMode.CREATE_OR_APPEND);
        }

        IndexWriter writer = new IndexWriter(dir, iwc);
        indexDocs(writer, docDir);

        writer.close();

        Date end = new Date();
        System.err.println(end.getTime() - start.getTime() + " total milliseconds");
    } catch (IOException e) {
        System.err.println(" caught a " + e.getClass() + "\n with message: " + e.getMessage());
    }
}

From source file:fr.paris.lutece.plugins.directory.service.directorysearch.DirectorySearchService.java

License:Open Source License

/**
 * Process indexing/*from   w w  w.j a v  a2 s. c  om*/
 * @param bCreate true for start full indexing
 *            false for begin incremental indexing
 * @return the log
 */
public String processIndexing(boolean bCreate) {
    StringBuffer sbLogs = new StringBuffer();
    IndexWriter writer = null;
    boolean bCreateIndex = bCreate;

    try {
        sbLogs.append("\r\nIndexing all contents ...\r\n");

        if (!DirectoryReader.indexExists(_luceneDirectory)) { //init index
            bCreateIndex = true;
        }

        if (!bCreateIndex && IndexWriter.isLocked(_luceneDirectory)) {
            IndexWriter.unlock(_luceneDirectory);
        }

        IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_46, _analyzer);

        if (bCreateIndex) {
            conf.setOpenMode(OpenMode.CREATE);
        } else {
            conf.setOpenMode(OpenMode.APPEND);
        }

        writer = new IndexWriter(_luceneDirectory, conf);

        Date start = new Date();

        sbLogs.append("\r\n<strong>Indexer : ");
        sbLogs.append(_indexer.getName());
        sbLogs.append(" - ");
        sbLogs.append(_indexer.getDescription());
        sbLogs.append("</strong>\r\n");
        _indexer.processIndexing(writer, bCreateIndex, sbLogs);

        Date end = new Date();

        sbLogs.append("Duration of the treatment : ");
        sbLogs.append(end.getTime() - start.getTime());
        sbLogs.append(" milliseconds\r\n");
    } catch (Exception e) {
        sbLogs.append(" caught a ");
        sbLogs.append(e.getClass());
        sbLogs.append("\n with message: ");
        sbLogs.append(e.getMessage());
        sbLogs.append("\r\n");
        AppLogService.error("Indexing error : " + e.getMessage(), e);
    } finally {
        try {
            if (writer != null) {
                writer.close();
            }
        } catch (IOException e) {
            AppLogService.error(e.getMessage(), e);
        }
    }

    return sbLogs.toString();
}

From source file:fr.paris.lutece.plugins.document.service.docsearch.DocSearchService.java

License:Open Source License

/**
 * Indexing documents for searching/*from   w  ww  . j  a v  a 2  s  . c  o  m*/
 * @param bCreate tell if it's total indexing or total (total = true)
 * @return indexing logs
 */
public String processIndexing(boolean bCreate) {
    StringBuilder sbLogs = new StringBuilder();

    IndexWriter writer = null;
    boolean bCreateIndex = bCreate;

    try {
        sbLogs.append("\r\nIndexing all contents ...\r\n");

        Directory dir = NIOFSDirectory.open(new File(_strIndex));

        if (!DirectoryReader.indexExists(dir)) { //init index
            bCreateIndex = true;
        }

        Date start = new Date();
        IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_46, _analyzer);

        if (bCreateIndex) {
            conf.setOpenMode(OpenMode.CREATE);
        } else {
            conf.setOpenMode(OpenMode.APPEND);
        }

        writer = new IndexWriter(dir, conf);

        if (!bCreateIndex) {
            //incremental indexing

            //add all document which must be add
            for (IndexerAction action : getAllIndexerActionByTask(IndexerAction.TASK_CREATE)) {
                try {
                    ArrayList<Integer> luceneDocumentId = new ArrayList<Integer>();
                    luceneDocumentId.add(action.getIdDocument());

                    List<org.apache.lucene.document.Document> luceneDocument = _indexer
                            .getDocuments(luceneDocumentId);

                    if ((luceneDocument != null) && (luceneDocument.size() > 0)) {
                        Iterator<org.apache.lucene.document.Document> it = luceneDocument.iterator();

                        while (it.hasNext()) {
                            org.apache.lucene.document.Document doc = it.next();
                            writer.addDocument(doc);
                            sbLogs.append("Adding ");
                            sbLogs.append(doc.get(DocSearchItem.FIELD_TYPE));
                            sbLogs.append(" #");
                            sbLogs.append(doc.get(DocSearchItem.FIELD_UID));
                            sbLogs.append(" - ");
                            sbLogs.append(doc.get(DocSearchItem.FIELD_TITLE));
                            sbLogs.append("\r\n");
                        }
                    }
                } catch (IOException e) {
                    sbLogs.append("Error durign document indexation parsing.");
                    sbLogs.append("\r\n");
                }

                removeIndexerAction(action.getIdAction());
            }

            //Update all document which must be update
            for (IndexerAction action : getAllIndexerActionByTask(IndexerAction.TASK_MODIFY)) {
                try {
                    ArrayList<Integer> luceneDocumentId = new ArrayList<Integer>();
                    luceneDocumentId.add(action.getIdDocument());

                    List<org.apache.lucene.document.Document> luceneDocument = _indexer
                            .getDocuments(luceneDocumentId);

                    if ((luceneDocument != null) && (luceneDocument.size() > 0)) {
                        Iterator<org.apache.lucene.document.Document> it = luceneDocument.iterator();

                        while (it.hasNext()) {
                            org.apache.lucene.document.Document doc = it.next();
                            writer.updateDocument(
                                    new Term(DocSearchItem.FIELD_UID, Integer.toString(action.getIdDocument())),
                                    doc);
                            sbLogs.append("Updating ");
                            sbLogs.append(doc.get(DocSearchItem.FIELD_TYPE));
                            sbLogs.append(" #");
                            sbLogs.append(doc.get(DocSearchItem.FIELD_UID));
                            sbLogs.append(" - ");
                            sbLogs.append(doc.get(DocSearchItem.FIELD_TITLE));
                            sbLogs.append("\r\n");
                        }
                    }
                } catch (IOException e) {
                    sbLogs.append("Error durign document indexation parsing.");
                    sbLogs.append("\r\n");
                }

                removeIndexerAction(action.getIdAction());
            }

            //delete all document which must be delete
            for (IndexerAction action : getAllIndexerActionByTask(IndexerAction.TASK_DELETE)) {
                writer.deleteDocuments(
                        new Term(DocSearchItem.FIELD_UID, Integer.toString(action.getIdDocument())));
                sbLogs.append("Deleting ");
                sbLogs.append(" #");
                sbLogs.append(action.getIdDocument());
                sbLogs.append("\r\n");

                removeIndexerAction(action.getIdAction());
            }
        } else {
            //delete all incremental action
            removeAllIndexerAction();

            Collection<Integer> listIdDocuments = DocumentHome.findAllPrimaryKeys();
            ArrayList<Integer> luceneDocumentId;

            for (Integer nIdDocument : listIdDocuments) {
                try {
                    luceneDocumentId = new ArrayList<Integer>();
                    luceneDocumentId.add(nIdDocument);

                    List<Document> listDocuments = _indexer.getDocuments(luceneDocumentId);

                    for (Document doc : listDocuments) {
                        writer.addDocument(doc);
                        sbLogs.append("Indexing ");
                        sbLogs.append(doc.get(DocSearchItem.FIELD_TYPE));
                        sbLogs.append(" #");
                        sbLogs.append(doc.get(DocSearchItem.FIELD_UID));
                        sbLogs.append(" - ");
                        sbLogs.append(doc.get(DocSearchItem.FIELD_TITLE));
                        sbLogs.append("\r\n");
                    }
                } catch (IOException e) {
                    sbLogs.append("Error durign document indexation parsing.");
                    sbLogs.append("\r\n");
                }
            }
        }

        Date end = new Date();
        sbLogs.append("Duration of the treatment : ");
        sbLogs.append(end.getTime() - start.getTime());
        sbLogs.append(" milliseconds\r\n");
    } catch (Exception e) {
        sbLogs.append(" caught a ");
        sbLogs.append(e.getClass());
        sbLogs.append("\n with message: ");
        sbLogs.append(e.getMessage());
        sbLogs.append("\r\n");
        AppLogService.error("Indexing error : " + e.getMessage(), e);
    } finally {
        try {
            if (writer != null) {
                writer.close();
            }
        } catch (IOException e) {
            AppLogService.error(e.getMessage(), e);
        }
    }

    return sbLogs.toString();
}

From source file:fr.paris.lutece.portal.service.search.LuceneSearchEngineTest.java

License:Open Source License

private IndexWriter getIndexWriter() throws Exception {
    Directory dir = IndexationService.getDirectoryIndex();
    IndexWriterConfig conf = new IndexWriterConfig(IndexationService.getAnalyser());
    conf.setOpenMode(OpenMode.CREATE);
    return new IndexWriter(dir, conf);
}

From source file:gal.udc.fic.muei.tfm.dap.flipper.service.util.cbir.LireBuilder.java

License:Open Source License

/**
 * Index a picture//w w  w  . ja va  2s .  c o  m
 * @param source
 * @param picture_id
 * @param conf
 * @throws IOException
 */
public static void index(byte[] source, UUID picture_id, IndexWriterConfig conf) throws IOException {
    ByteArrayInputStream in = new ByteArrayInputStream(source);
    BufferedImage image = ImageIO.read(in);

    // Creating an Lucene IndexWriter
    log.debug("Is Lucene configured? " + (conf == null));
    if (conf == null) {
        conf = new IndexWriterConfig(LuceneUtils.LUCENE_VERSION,
                new WhitespaceAnalyzer(LuceneUtils.LUCENE_VERSION));
        conf.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND);
    }

    luceneIndexer(image, picture_id, FeatureEnumerate.AutoColorCorrelogram.getText(),
            DocumentBuilderFactory.getAutoColorCorrelogramDocumentBuilder(), conf);
    luceneIndexer(image, picture_id, FeatureEnumerate.CEDD.getText(),
            DocumentBuilderFactory.getCEDDDocumentBuilder(), conf);
    luceneIndexer(image, picture_id, FeatureEnumerate.ColorLayout.getText(),
            DocumentBuilderFactory.getColorLayoutBuilder(), conf);
    luceneIndexer(image, picture_id, FeatureEnumerate.EdgeHistogram.getText(),
            DocumentBuilderFactory.getEdgeHistogramBuilder(), conf);
    luceneIndexer(image, picture_id, FeatureEnumerate.ColorHistogram.getText(),
            DocumentBuilderFactory.getColorHistogramDocumentBuilder(), conf);
    luceneIndexer(image, picture_id, FeatureEnumerate.PHOG.getText(),
            DocumentBuilderFactory.getPHOGDocumentBuilder(), conf);

}

From source file:gal.udc.fic.muei.tfm.dap.flipper.service.util.cbir.LireBuilder.java

License:Open Source License

private static void deleteFromFeature(UUID pictureId, Term term, String prefix, IndexWriterConfig conf)
        throws IOException {

    File file = getPath(prefix);/*from   ww  w  .j  ava2s  . c  om*/

    // Creating an Lucene IndexWriter
    log.debug("Is Lucene configured: " + (conf == null));
    if (conf == null) {
        conf = new IndexWriterConfig(LuceneUtils.LUCENE_VERSION,
                new WhitespaceAnalyzer(LuceneUtils.LUCENE_VERSION));
        conf.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND);
    }
    IndexWriter iw = new IndexWriter(FSDirectory.open(file), conf);

    iw.deleteDocuments(term);

    iw.close();
}

From source file:gov.nist.basekb.FreebaseIndexer.java

License:LGPL

public void initializeIndexBuilder() throws Exception {
    // Create a new index directory and writer to index a triples file.
    // Raise an error if an index already exists, so we don't accidentally overwrite it.
    String indexDir = getIndexDirectoryName();
    if ((new File(indexDir)).isDirectory())
        throw new IOException("Index directory already exists, remove it before indexing");

    indexDirectory = FSDirectory.open(Paths.get(indexDir));
    IndexWriterConfig iwc = new IndexWriterConfig(getIndexAnalyzer());

    // we always create a new index from scratch:
    iwc.setOpenMode(OpenMode.CREATE);
    iwc.setCodec(new Lucene54Codec(Mode.BEST_SPEED)); // the default
    //iwc.setCodec(new Lucene54Codec(Mode.BEST_COMPRESSION));  // slower, but better compression

    indexWriter = new IndexWriter(indexDirectory, iwc);
    indexAnalyzer = getIndexAnalyzer();//from   w  ww  . j  a va2s . c o m

    if (INDEX_PREDICATES)
        printlnProg("Indexing individual predicates");
    if (INDEX_TEXT)
        printlnProg("Indexing combined predicate text values");
    if (INDEX_LANGUAGE)
        printlnProg("Indexing predicates for language(s): " + supportedLanguages);
}

From source file:gov.nist.basekb.FreebaseTools.java

License:LGPL

public void initializeIndexBuilder() throws Exception {
    // Create a new index directory and writer to index a triples file.
    // Raise an error if an index already exists, so we don't accidentally overwrite it.
    String indexDir = getIndexDirectoryName();
    if ((new java.io.File(indexDir)).isDirectory())
        throw new IOException("Index directory already exists, remove it before indexing");

    indexDirectory = FSDirectory.open(Paths.get(indexDir));
    IndexWriterConfig iwc = new IndexWriterConfig(getIndexAnalyzer());

    // we always create a new index from scratch:
    iwc.setOpenMode(OpenMode.CREATE);
    iwc.setCodec(new Lucene54Codec(Mode.BEST_SPEED)); // the default
    //iwc.setCodec(new Lucene54Codec(Mode.BEST_COMPRESSION));  // slower, but better compression

    indexWriter = new IndexWriter(indexDirectory, iwc);
    indexAnalyzer = getIndexAnalyzer();//  w  w w  .j av  a2  s.  c o m

    if (INDEX_PREDICATES)
        printlnProg("Indexing individual predicates");
    if (INDEX_TEXT)
        printlnProg("Indexing combined predicate text values");
    if (INDEX_LANGUAGE)
        printlnProg("Indexing predicates for language(s): " + supportedLanguages);
}