List of usage examples for org.apache.lucene.index IndexWriter close
@Override public void close() throws IOException
From source file:de.cosmocode.lucene.LuceneQueryTest.java
License:Apache License
/** * Creates a new Lucene Index with some predefined field and arguments. * @throws IOException if creating the index failed *//* w ww.j av a 2 s. c o m*/ @BeforeClass public static void createLuceneIndex() throws IOException { final IndexWriter writer = IndexHelper.createIndexWriter(); // iterate through all fields for (final String field : IndexHelper.ALL_FIELDS) { // name for default field: "arg", every other field: (fieldName)_arg final String fieldName = (field.equals(IndexHelper.DEFAULT_FIELD)) ? "arg" : field + "_arg"; // iterate through all ARGS in IndexHelper and add documents with the current field int argsCounter = 0; for (final Object arg : IndexHelper.ARGS) { argsCounter++; // create document with only two fields: name and field => arg. writer.addDocument( IndexHelper.createDocument("name", fieldName + argsCounter, field, arg.toString())); // add all args up to current argument combined for conjunct searches final Multimap<String, Object> multiArgs = ArrayListMultimap.create(); multiArgs.put("name", fieldName + "1-" + argsCounter); for (final Object innerArg : Arrays.copyOf(IndexHelper.ARGS, argsCounter)) { multiArgs.put(field, innerArg); } writer.addDocument(IndexHelper.createDocument(multiArgs)); } } writer.close(); }
From source file:de.cosmocode.lucene.LuceneQueryTest.java
License:Apache License
/** * Cleans the index up again, to ensure an empty lucene index in the next test. * @throws IOException if cleaning the index failed * @throws ParseException should not happen *//*from w ww. jav a2 s . c o m*/ @AfterClass public static void cleanLuceneIndex() throws IOException, ParseException { final IndexWriter writer = IndexHelper.createIndexWriter(); writer.deleteDocuments(IndexHelper.createQueryParser().parse("+empty:empty")); writer.close(); }
From source file:de.dfki.km.leech.lucene.ToLuceneContentHandler.java
License:Open Source License
/** * Will merge all temporar indices together into the initial indexWriter index. This is only necessary if SplitAndMerge is enabled. Otherwise you don't have to invoke * this method./*from w w w. j ava 2 s. com*/ */ @Override public void crawlFinished() { try { for (int i = 0; i < m_llConsumerThreads.size(); i++) m_addDocsQueue.put(new InterruptThreadList()); m_cyclicBarrier4DocConsumerThreads.await(); m_llConsumerThreads.clear(); if (getSplitAndMergeIndex() <= 0) return; // hier mergen wir nun alle temporren indices in den originalen // der temporren mssen noch geschlossen werden - das machen wir jetzt. Der letzte steht noch nicht in der Liste if (m_luceneWriter != m_initialLuceneWriter) { for (IndexWriter writer2close : m_llIndexWriter2Close) writer2close.close(); m_luceneWriter.close(); } LinkedList<Directory> llIndicesDirs2Merge = new LinkedList<Directory>(); for (String strTmpPath : m_hsTmpLuceneWriterPaths2Merge) llIndicesDirs2Merge.add(new SimpleFSDirectory(Paths.get(strTmpPath))); if (llIndicesDirs2Merge.size() == 0) return; Logger.getLogger(ToLuceneContentHandler.class.getName()) .info("Will merge " + llIndicesDirs2Merge.size() + " temporary indices to the final one."); m_initialLuceneWriter.addIndexes(llIndicesDirs2Merge.toArray(new Directory[0])); m_initialLuceneWriter.commit(); for (String strTmpPath : m_hsTmpLuceneWriterPaths2Merge) FileUtils.deleteDirectory(new File(strTmpPath)); } catch (Exception e) { Logger.getLogger(ToLuceneContentHandler.class.getName()).log(Level.SEVERE, "Error", e); } }
From source file:de.dfki.km.perspecting.obie.corpus.TextCorpus.java
License:Open Source License
/** * Returns a Lucene index on this {@link TextCorpus}. * //from w w w .jav a 2s .c om * @param dir * The directory the index is stored. * @param reindex * If <code>true</code>, an existing index will be re-created. * @return Access to the Lucene index. * * @throws Exception */ public IndexSearcher getLuceneIndex(File dir, boolean reindex) throws Exception { if (dir.exists()) { if (reindex) { FileUtils.deleteDirectory(dir); log.info("deleted directory: " + dir); } else { return new IndexSearcher(dir.getAbsolutePath()); } } dir.mkdirs(); log.info("created directory: " + dir); final WhitespaceAnalyzer analyser = new WhitespaceAnalyzer(); final IndexWriter indexWriter = new IndexWriter(dir, analyser, true, MaxFieldLength.LIMITED); forEach(new DocumentProcedure<String>() { @Override public String process(Reader doc, URI uri) throws Exception { org.apache.lucene.document.Document document = new org.apache.lucene.document.Document(); document.add(new Field("text", doc, TermVector.YES)); indexWriter.addDocument(document, analyser); log.fine("indexes: " + document); return uri.toString(); } }); log.info("indexed: " + indexWriter.numDocs() + " documents"); indexWriter.commit(); indexWriter.close(); return new IndexSearcher(dir.getAbsolutePath()); }
From source file:de.elbe5.cms.search.SearchBean.java
License:Open Source License
public void deleteItem(int id, String dataType) { IndexWriter writer = null; try {/*from w w w . j a v a2 s . c o m*/ switch (dataType) { case SiteSearchData.TYPE: case PageSearchData.TYPE: case FileSearchData.TYPE: { writer = openContentIndexWriter(false); } break; case UserSearchData.TYPE: { writer = openUserIndexWriter(false); } break; } assert (writer != null); writer.deleteDocuments(SearchData.getTerm(id)); writer.close(); } catch (Exception e) { Log.error("error deleting item", e); } }
From source file:de.hsmannheim.ss15.alr.searchengine.DefaultLuceneController.java
public void refreshIndex() { boolean create = true; final Path docDir = Paths.get(docsDir); IndexWriter writer = null; try {//from w ww .ja v a2 s . c om Directory dir = FSDirectory.open(Paths.get(indexDir)); Analyzer analyzer = new StandardAnalyzer(); IndexWriterConfig iwc = new IndexWriterConfig(analyzer); // Add new documents to an existing index: iwc.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND); // Optional: for better indexing performance, if you // are indexing many documents, increase the RAM // buffer. But if you do this, increase the max heap // size to the JVM (eg add -Xmx512m or -Xmx1g): // // iwc.setRAMBufferSizeMB(256.0); writer = new IndexWriter(dir, iwc); indexDocs(writer, docDir); // NOTE: if you want to maximize search performance, // you can optionally call forceMerge here. This can be // a terribly costly operation, so generally it's only // worth it when your index is relatively static (ie // you're done adding documents to it): // // writer.forceMerge(1); writer.close(); } catch (IOException e) { LOGGER.warn("Exception while indexing", e); } finally { if (writer != null) { try { writer.close(); } catch (IOException ex) { java.util.logging.Logger.getLogger(DefaultLuceneController.class.getName()).log(Level.SEVERE, null, ex); } } } }
From source file:de.hybris.platform.lucenesearch.jalo.LuceneTest.java
License:Open Source License
@Before public void setUp() throws Exception { directory = new RAMDirectory(); final IndexWriterConfig indexWriterConfig = new IndexWriterConfig(Version.LUCENE_40, new StandardAnalyzer(Version.LUCENE_40)).setOpenMode(OpenMode.CREATE_OR_APPEND); final IndexWriter writer = new IndexWriter(directory, indexWriterConfig); docA = new Document(); docA.add(new Field("key", "a", Field.Store.YES, Field.Index.NOT_ANALYZED)); docA.add(new Field("text", "text zum ersten document", Field.Store.YES, Field.Index.ANALYZED)); writer.addDocument(docA);/*from ww w . j ava2s . com*/ docB = new Document(); docB.add(new Field("key", "b", Field.Store.YES, Field.Index.NOT_ANALYZED)); docB.add(new Field("text", "text zum zweiten document", Field.Store.YES, Field.Index.ANALYZED)); writer.addDocument(docB); docC = new Document(); docC.add(new Field("key", "c", Field.Store.YES, Field.Index.NOT_ANALYZED)); docC.add(new Field("text", "text zum dritten document", Field.Store.YES, Field.Index.ANALYZED)); writer.addDocument(docC); //writer.optimize(); writer.close(); }
From source file:de.hybris.platform.lucenesearch.jalo.LuceneTest.java
License:Open Source License
@Test public void testReindex() throws IOException { assertTermSearch(set(docA, docB, docC), "text"); final IndexWriterConfig indexWriterConfig = new IndexWriterConfig(Version.LUCENE_40, new StandardAnalyzer(Version.LUCENE_40)).setOpenMode(OpenMode.APPEND); final IndexWriter changewriter = new IndexWriter(directory, indexWriterConfig); changewriter.deleteDocuments(new Term("key", "b")); final Document docB2 = new Document(); docB2.add(new Field("key", "b", Field.Store.YES, Field.Index.NOT_ANALYZED)); docB2.add(new Field("text", "neuer texxxt zum zweiten document", Field.Store.YES, Field.Index.ANALYZED)); changewriter.addDocument(docB2);/*from www . jav a2 s.com*/ changewriter.close(); assertTermSearch(set(docA, docB2, docC), "zum"); assertTermSearch(set(docA, docC), "text"); assertTermSearch(set(docB2), "texxxt"); }
From source file:de.ingrid.interfaces.csw.index.impl.IngridGeoTKLuceneIndexer.java
License:EUPL
/** * This method remove documents identified by query from the index. * /*from w w w. j a va 2 s .com*/ * @param query * @throws ParseException */ public List<String> removeDocumentByQuery(final String queryString) throws ParseException { List<String> deletedRecords = new ArrayList<String>(); try { final QueryParser parser = new QueryParser(Version.LUCENE_36, "anytext", analyzer); Query query = parser.parse(queryString); final IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_36, analyzer); final IndexWriter writer = new IndexWriter(LuceneUtils.getAppropriateDirectory(getFileDirectory()), config); LOGGER.log(logLevel, "Query:{0}", query); IndexReader reader = IndexReader.open(writer, false); IndexSearcher searcher = new IndexSearcher(reader); TopDocs docs = searcher.search(query, Integer.MAX_VALUE); for (ScoreDoc doc : docs.scoreDocs) { deletedRecords.add(reader.document(doc.doc).get("id")); } writer.deleteDocuments(query); writer.commit(); searcher.close(); reader.close(); writer.close(); } catch (CorruptIndexException ex) { LOGGER.log(Level.WARNING, "CorruptIndexException while indexing document: " + ex.getMessage(), ex); } catch (IOException ex) { LOGGER.log(Level.WARNING, "IOException while indexing document: " + ex.getMessage(), ex); } return deletedRecords; }
From source file:de.ingrid.search.utils.facet.DummyIndex.java
License:EUPL
private static File createTestIndex() { File indexDirectory = new File("./test_index"); if (!indexDirectory.exists()) { try {//from www . j a va2 s.c o m IndexWriter writer = new IndexWriter(FSDirectory.getDirectory(indexDirectory), new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); for (Object[][] doc : IndexDef) { Document document = new Document(); for (Object[] fields : doc) { document.add(new Field((String) fields[0], (String) fields[1], (Field.Store) fields[2], (Field.Index) fields[3])); } writer.addDocument(document); } writer.close(); } catch (CorruptIndexException e) { // TODO Auto-generated catch block e.printStackTrace(); } catch (LockObtainFailedException e) { // TODO Auto-generated catch block e.printStackTrace(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } } return indexDirectory; }