List of usage examples for org.apache.lucene.index IndexWriter commit
@Override public final long commit() throws IOException
Commits all pending changes (added and deleted documents, segment merges, added indexes, etc.) to the index, and syncs all referenced index files, such that a reader will see the changes and the index updates will survive an OS or machine crash or power loss.
From source file:org.hibernate.search.test.util.logging.LoggerInfoStreamTest.java
License:LGPL
@Test public void testEnableInfoStream() throws Exception { LoggerInfoStream infoStream = new LoggerInfoStream(); RAMDirectory directory = new RAMDirectory(); IndexWriterConfig indexWriterConfig = new IndexWriterConfig(new StandardAnalyzer()); indexWriterConfig.setInfoStream(infoStream); IndexWriter indexWriter = new IndexWriter(directory, indexWriterConfig); Document doc = new Document(); doc.add(new StringField("f1", "value1", Field.Store.YES)); indexWriter.addDocument(doc);/*from www .ja v a2s .com*/ indexWriter.commit(); indexWriter.close(); List<LoggingEvent> loggingEvents = testAppender .searchByLoggerAndMessage(LogCategory.INFOSTREAM_LOGGER_CATEGORY.getName(), "IW:"); assertFalse(loggingEvents.isEmpty()); }
From source file:org.hip.vif.core.search.AbstractVIFIndexer.java
License:Open Source License
/** Adds the content of a single entry in a DB table to the index. * // w w w. j a v a 2s. co m * @param inHome GeneralDomainObjectHome * @param inKey KeyObject * @throws IOException * @throws SQLException * @throws VException */ protected void addEntryToIndex(final GeneralDomainObjectHome inHome, final KeyObject inKey) throws IOException, VException, SQLException { beforeChange(); final IndexWriter lWriter = getIndexWriter(false); processSelection(lWriter, inHome, inKey); lWriter.commit(); afterChange(); }
From source file:org.hip.vif.core.search.AbstractVIFIndexer.java
License:Open Source License
protected void deleteEntryInIndex(final Term inTerm) throws IOException { final IndexWriter lWriter = getIndexWriter(false); lWriter.deleteDocuments(inTerm);// w ww. j av a 2 s . co m lWriter.commit(); }
From source file:org.hip.vif.core.search.VIFIndexing.java
License:Open Source License
private IndexWriter createChecked(final Directory inDir) throws IOException { if (inDir.listAll().length == 0) { final IndexWriter lNew = new IndexWriter(inDir, createConfiguration(false)); lNew.commit(); lNew.close();//from w w w .j av a 2s . c om } return new IndexWriter(inDir, createConfiguration(false)); }
From source file:org.infinispan.lucene.cachestore.TestHelper.java
License:Open Source License
/** * Creates terms and inserts them into the index. * * @param rootDir index root directory. * @param indexName the name of the index. * @param termsToAdd number of terms to be added. * @param invert flag which identifies which terms should be inserted which not. * @throws IOException// w ww. j a v a2 s. co m */ public static void createIndex(File rootDir, String indexName, int termsToAdd, boolean invert) throws IOException { File indexDir = new File(rootDir, indexName); FSDirectory directory = FSDirectory.open(indexDir); try { CacheTestSupport.initializeDirectory(directory); IndexWriter iwriter = LuceneSettings.openWriter(directory, 100000); try { for (int i = 0; i <= termsToAdd; i++) { Document doc = new Document(); String term = String.valueOf(i); //For even values of i we add to "main" field if (i % 2 == 0 ^ invert) { doc.add(new Field("main", term, Field.Store.NO, Field.Index.NOT_ANALYZED)); } else { doc.add(new Field("secondaryField", term, Field.Store.YES, Field.Index.NOT_ANALYZED)); } iwriter.addDocument(doc); } iwriter.commit(); } finally { iwriter.close(); } } finally { directory.close(); } }
From source file:org.jabylon.index.properties.impl.PropertyIndex.java
License:Open Source License
@Override protected IStatus run(IProgressMonitor monitor) { IndexWriter writer = null; try {//w w w.j a va 2 s .c o m writer = IndexActivator.getDefault().obtainIndexWriter(); while (true) { DocumentTuple documentTuple = writes.poll(20, TimeUnit.SECONDS); if (documentTuple == null) break; List<Document> documents = documentTuple.getDocuments(); switch (documentTuple.getAction()) { case CREATE: for (Document document : documents) { writer.addDocument(document); } break; case DELETE: StringBuilder builder = new StringBuilder(); CDOIDUtil.write(builder, documentTuple.getDescriptor().cdoID()); writer.deleteDocuments(new Term(QueryService.FIELD_CDO_ID, builder.toString())); break; case REPLACE: writer.deleteDocuments(new Term(QueryService.FIELD_FULL_PATH, documentTuple.getDescriptor().fullPath().toString())); for (Document document : documents) { writer.addDocument(document); } break; default: break; } } writer.commit(); } catch (CorruptIndexException e) { logger.error("Exception while indexing", e); } catch (LockObtainFailedException e) { logger.error("Exception while indexing", e); } catch (IOException e) { logger.error("Exception while indexing", e); } catch (InterruptedException e) { logger.warn("Interrupted while waiting for new index events", e); } finally { try { IndexActivator.getDefault().returnIndexWriter(writer); } catch (CorruptIndexException e) { logger.error("Exception while closing index writer", e); } catch (IOException e) { logger.error("Exception while closing index writer", e); } } return Status.OK_STATUS; }
From source file:org.jabylon.index.properties.jobs.impl.ReorgIndexJob.java
License:Open Source License
public static void indexWorkspace(RepositoryConnector connector, IProgressMonitor monitor) throws CorruptIndexException, IOException { long time = System.currentTimeMillis(); logger.info("Reorg of search index started"); IndexWriter writer = null; CDONet4jSession session = null;//from www .j a va 2 s. c o m SubMonitor submon = SubMonitor.convert(monitor, 100); try { writer = IndexActivator.getDefault().obtainIndexWriter(); writer.deleteAll(); session = connector.createSession(); CDOView view = connector.openView(session); CDOResource resource = view.getResource(ServerConstants.WORKSPACE_RESOURCE); Workspace workspace = (Workspace) resource.getContents().get(0); indexWorkspace(workspace, writer, submon.newChild(95)); indexTMX(writer, submon.newChild(5)); writer.commit(); } catch (OutOfMemoryError error) { logger.error("Out of memory during index reorg", error); //As suggested by lucene documentation writer.close(); } catch (Exception e) { logger.error("Exception during index reorg. Rolling back", e); if (writer != null) writer.rollback(); throw new IllegalStateException("Failed to write index", e); } finally { if (monitor != null) monitor.done(); if (session != null) { session.close(); } IndexActivator.getDefault().returnIndexWriter(writer); } long duration = (System.currentTimeMillis() - time) / 1000; logger.info("Search Index Reorg finished. Took {} seconds", duration); }
From source file:org.janusgraph.diskstorage.lucene.LuceneIndex.java
License:Apache License
private void mutateStores(Map.Entry<String, Map<String, IndexMutation>> stores, KeyInformation.IndexRetriever information) throws IOException, BackendException { IndexReader reader = null;// www. j a v a 2s .co m try { final String storeName = stores.getKey(); final IndexWriter writer = getWriter(storeName, information); reader = DirectoryReader.open(writer, true, true); final IndexSearcher searcher = new IndexSearcher(reader); for (final Map.Entry<String, IndexMutation> entry : stores.getValue().entrySet()) { final String documentId = entry.getKey(); final IndexMutation mutation = entry.getValue(); if (mutation.isDeleted()) { if (log.isTraceEnabled()) log.trace("Deleted entire document [{}]", documentId); writer.deleteDocuments(new Term(DOCID, documentId)); continue; } final Pair<Document, Map<String, Shape>> docAndGeo = retrieveOrCreate(documentId, searcher); final Document doc = docAndGeo.getKey(); final Map<String, Shape> geoFields = docAndGeo.getValue(); Preconditions.checkNotNull(doc); for (final IndexEntry del : mutation.getDeletions()) { Preconditions.checkArgument(!del.hasMetaData(), "Lucene index does not support indexing meta data: %s", del); final String key = del.field; if (doc.getField(key) != null) { if (log.isTraceEnabled()) log.trace("Removing field [{}] on document [{}]", key, documentId); doc.removeFields(key); geoFields.remove(key); } } addToDocument(storeName, documentId, doc, mutation.getAdditions(), geoFields, information); //write the old document to the index with the modifications writer.updateDocument(new Term(DOCID, documentId), doc); } writer.commit(); } finally { IOUtils.closeQuietly(reader); } }
From source file:org.janusgraph.diskstorage.lucene.LuceneIndex.java
License:Apache License
@Override public void restore(Map<String, Map<String, List<IndexEntry>>> documents, KeyInformation.IndexRetriever information, BaseTransaction tx) throws BackendException { writerLock.lock();// ww w. j a v a2 s . c o m try { for (final Map.Entry<String, Map<String, List<IndexEntry>>> stores : documents.entrySet()) { final String store = stores.getKey(); final IndexWriter writer = getWriter(store, information); final IndexReader reader = DirectoryReader.open(writer, true, true); final IndexSearcher searcher = new IndexSearcher(reader); for (final Map.Entry<String, List<IndexEntry>> entry : stores.getValue().entrySet()) { final String docID = entry.getKey(); final List<IndexEntry> content = entry.getValue(); if (content == null || content.isEmpty()) { if (log.isTraceEnabled()) log.trace("Deleting document [{}]", docID); writer.deleteDocuments(new Term(DOCID, docID)); continue; } final Pair<Document, Map<String, Shape>> docAndGeo = retrieveOrCreate(docID, searcher); addToDocument(store, docID, docAndGeo.getKey(), content, docAndGeo.getValue(), information); //write the old document to the index with the modifications writer.updateDocument(new Term(DOCID, docID), docAndGeo.getKey()); } writer.commit(); } tx.commit(); } catch (final IOException e) { throw new TemporaryBackendException("Could not update Lucene index", e); } finally { writerLock.unlock(); } }
From source file:org.moxie.proxy.LuceneExecutor.java
License:Apache License
/** * This completely indexes the repository and will destroy any existing * index./*w ww . j a va2s.c o m*/ * * @param repositoryName * @return IndexResult */ public IndexResult reindex(String repository) { IndexResult result = new IndexResult(); if (!deleteIndex(repository)) { return result; } try { MoxieCache moxieCache = config.getMoxieCache(); IMavenCache repositoryCache = config.getMavenCache(repository); Collection<File> files = repositoryCache.getFiles("." + org.moxie.Constants.POM); IndexWriter writer = getIndexWriter(repository); for (File pomFile : files) { try { Pom pom = PomReader.readPom(moxieCache, pomFile); String date = DateTools.timeToString(pomFile.lastModified(), Resolution.MINUTE); Document doc = new Document(); doc.add(new Field(FIELD_PACKAGING, pom.packaging, Store.YES, Index.NOT_ANALYZED_NO_NORMS)); doc.add(new Field(FIELD_GROUPID, pom.groupId, Store.YES, Index.ANALYZED)); doc.add(new Field(FIELD_ARTIFACTID, pom.artifactId, Store.YES, Index.ANALYZED)); doc.add(new Field(FIELD_VERSION, pom.version, Store.YES, Index.ANALYZED)); if (!StringUtils.isEmpty(pom.name)) { doc.add(new Field(FIELD_NAME, pom.name, Store.YES, Index.ANALYZED)); } if (!StringUtils.isEmpty(pom.description)) { doc.add(new Field(FIELD_DESCRIPTION, pom.description, Store.YES, Index.ANALYZED)); } doc.add(new Field(FIELD_DATE, date, Store.YES, Index.ANALYZED)); // add the pom to the index writer.addDocument(doc); } catch (Exception e) { logger.log(Level.SEVERE, MessageFormat.format("Exception while reindexing {0} in {1}", pomFile, repository), e); } result.artifactCount++; } writer.commit(); resetIndexSearcher(repository); result.success(); } catch (Exception e) { logger.log(Level.SEVERE, "Exception while reindexing " + repository, e); } return result; }