Example usage for org.apache.lucene.index IndexWriter deleteDocuments

List of usage examples for org.apache.lucene.index IndexWriter deleteDocuments

Introduction

In this page you can find the example usage for org.apache.lucene.index IndexWriter deleteDocuments.

Prototype

public long deleteDocuments(Query... queries) throws IOException 

Source Link

Document

Deletes the document(s) matching any of the provided queries.

Usage

From source file:org.compass.core.lucene.engine.transaction.support.WriterHelper.java

License:Apache License

/**
 * Deletes the give resoruce key from the writer.
 *///w  w  w .j a  v  a  2s  . c  o m
public static void processDelete(IndexWriter writer, ResourceKey resourceKey) throws IOException {
    writer.deleteDocuments(new Term(resourceKey.getUIDPath(), resourceKey.buildUID()));
}

From source file:org.compass.core.lucene.engine.transaction.support.WriterHelper.java

License:Apache License

/**
 * Deletes all the resources that match the given query.
 *///from   ww  w  . jav a 2s.  co m
public static void processDelete(IndexWriter writer, Query query) throws IOException {
    writer.deleteDocuments(query);
}

From source file:org.dspace.search.DSIndexer.java

License:BSD License

private static void executeIndexingTask(IndexWriter writer, IndexingTask action) throws IOException {
    if (action != null) {
        if (action.isDelete()) {
            if (action.getDocument() != null) {
                writer.updateDocument(action.getTerm(), action.getDocument());
            } else {
                writer.deleteDocuments(action.getTerm());
            }//from w  ww.j a  v a 2  s .com
        } else {
            writer.updateDocument(action.getTerm(), action.getDocument());
        }
    }
}

From source file:org.dspace.search.LuceneIndex.java

License:BSD License

private void commit(String documentKey, Document doc, boolean update) throws IOException {
    IndexWriter writer = null;
    Term term = new Term(DOCUMENT_KEY, documentKey);
    try {/*  w  w w. ja va  2  s  .  co  m*/
        writer = openIndex(false);
        if (update) {
            writer.updateDocument(term, doc);
        } else {
            writer.deleteDocuments(term);
        }
    } finally {
        if (doc != null) {
            closeAllReaders(doc);
        }
        if (writer != null) {
            try {
                writer.close();
            } catch (IOException e) {
                log.error("Unable to close IndexWriter", e);
            }
        }
    }
}

From source file:org.eclipse.dirigible.repository.ext.indexing.LuceneMemoryIndexer.java

License:Open Source License

@Override
public void deleteDocument(Object document) throws EIndexingException {
    try {//  w  ww .jav a  2s .  c om
        synchronized (directory) {

            logger.debug("entering: indexDocument(Document document) : " + indexName); //$NON-NLS-1$

            Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_35);
            IndexWriterConfig config = null;
            IndexWriter iwriter = null;
            try {
                config = new IndexWriterConfig(Version.LUCENE_35, analyzer);
                iwriter = new IndexWriter(directory, config);
                Term term = new Term(FIELD_ID, ((Document) document).get(FIELD_ID));
                iwriter.deleteDocuments(term);
                iwriter.commit();

                lastIndexed = new Date();

            } finally {
                if (iwriter != null) {
                    iwriter.close();
                }
            }
            logger.debug("exiting: indexRepository(IRepository repository) : " + indexName); //$NON-NLS-1$
        }
    } catch (Exception e) {
        throw new EIndexingException(e);
    }
}

From source file:org.eclipse.dltk.internal.core.index.lucene.IndexContainer.java

License:Open Source License

public synchronized void delete(String sourceModule) {
    Term term = new Term(IndexFields.F_PATH, sourceModule);
    try {/*from  w w w.ja  v a  2s. co  m*/
        // Cleanup related time stamp
        getTimestampsWriter().deleteDocuments(term);
        // Cleanup all related documents in data writers
        for (Map<Integer, IndexWriter> dataWriters : fIndexWriters.values()) {
            for (IndexWriter writer : dataWriters.values()) {
                writer.deleteDocuments(term);
            }
        }
    } catch (IOException e) {
        Logger.logException(e);
    }
}

From source file:org.eclipse.mylyn.internal.tasks.index.core.TaskListIndex.java

License:Open Source License

private void indexQueuedTasks(SubMonitor monitor)
        throws CorruptIndexException, LockObtainFailedException, IOException, CoreException {

    synchronized (reindexQueue) {
        if (reindexQueue.isEmpty()) {
            return;
        }/*  ww  w.j  a v a2  s.  c  o  m*/

        monitor.beginTask(Messages.TaskListIndex_task_rebuilding_index, reindexQueue.size());
    }

    try {
        IndexWriter writer = null;
        try {
            Map<ITask, TaskData> workingQueue = new HashMap<ITask, TaskData>();

            // reindex tasks that are in the reindexQueue, making multiple passes so that we catch anything
            // added/changed while we were reindexing
            for (;;) {
                workingQueue.clear();

                synchronized (reindexQueue) {
                    if (reindexQueue.isEmpty()) {
                        break;
                    }
                    // move items from the reindexQueue to the temporary working queue
                    workingQueue.putAll(reindexQueue);
                    reindexQueue.keySet().removeAll(workingQueue.keySet());
                }

                if (writer == null) {
                    try {
                        writer = createIndexWriter(false);
                    } catch (CorruptIndexException e) {
                        rebuildIndex = true;
                        synchronized (reindexQueue) {
                            reindexQueue.clear();
                        }
                        rebuildIndexCompletely(monitor);
                        return;
                    }
                }

                monitor.setWorkRemaining(workingQueue.size());

                for (Entry<ITask, TaskData> entry : workingQueue.entrySet()) {
                    ITask task = entry.getKey();
                    TaskData taskData = entry.getValue();

                    writer.deleteDocuments(
                            new Term(FIELD_IDENTIFIER.getIndexKey(), task.getHandleIdentifier()));

                    add(writer, task, taskData);

                    monitor.worked(1);
                }
            }
        } finally {
            if (writer != null) {
                writer.close();
            }
        }
    } finally {
        monitor.done();
    }
}

From source file:org.eclipse.smila.search.lucene.index.access.DeleteDocumentsOperation.java

License:Open Source License

/**
 * {@inheritDoc}/*ww w  . j  a  v  a 2s .  c  om*/
 * 
 * @see org.eclipse.smila.search.lucene.index.access.ISynchronizedOperation#process(java.lang.Object)
 */
public Void process(final IndexWriter object) throws IndexException {
    try {
        object.deleteDocuments(_term);
    } catch (final Exception e) {
        throw new IndexException("Unable to delete documents by term [" + _term.text() + "]", e);
    }
    return null;
}

From source file:org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReaderTests.java

License:Apache License

/** Test that core cache key (needed for NRT) is working */
public void testCoreCacheKey() throws Exception {
    Directory dir = newDirectory();//from   ww w. ja  v a2 s  . co m
    IndexWriterConfig iwc = new IndexWriterConfig(null);
    iwc.setMaxBufferedDocs(100);
    iwc.setMergePolicy(NoMergePolicy.INSTANCE);
    IndexWriter iw = new IndexWriter(dir, iwc);

    // add two docs, id:0 and id:1
    Document doc = new Document();
    Field idField = new StringField("id", "", Field.Store.NO);
    doc.add(idField);
    idField.setStringValue("0");
    iw.addDocument(doc);
    idField.setStringValue("1");
    iw.addDocument(doc);

    // open reader
    ShardId shardId = new ShardId(new Index("fake"), 1);
    DirectoryReader ir = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(iw, true), shardId);
    assertEquals(2, ir.numDocs());
    assertEquals(1, ir.leaves().size());

    // delete id:0 and reopen
    iw.deleteDocuments(new Term("id", "0"));
    DirectoryReader ir2 = DirectoryReader.openIfChanged(ir);

    // we should have the same cache key as before
    assertEquals(1, ir2.numDocs());
    assertEquals(1, ir2.leaves().size());
    assertSame(ir.leaves().get(0).reader().getCoreCacheKey(), ir2.leaves().get(0).reader().getCoreCacheKey());

    // this is kind of stupid, but for now its here
    assertNotSame(ir.leaves().get(0).reader().getCombinedCoreAndDeletesKey(),
            ir2.leaves().get(0).reader().getCombinedCoreAndDeletesKey());

    IOUtils.close(ir, ir2, iw, dir);
}

From source file:org.elasticsearch.common.lucene.index.ESDirectoryReaderTests.java

License:Apache License

/** Test that core cache key (needed for NRT) is working */
public void testCoreCacheKey() throws Exception {
    Directory dir = newDirectory();/* w  w w.  ja v a2  s .  co  m*/
    IndexWriterConfig iwc = new IndexWriterConfig(null);
    iwc.setMaxBufferedDocs(100);
    iwc.setMergePolicy(NoMergePolicy.INSTANCE);
    IndexWriter iw = new IndexWriter(dir, iwc);

    // add two docs, id:0 and id:1
    Document doc = new Document();
    Field idField = new StringField("id", "", Field.Store.NO);
    doc.add(idField);
    idField.setStringValue("0");
    iw.addDocument(doc);
    idField.setStringValue("1");
    iw.addDocument(doc);

    // open reader
    ShardId shardId = new ShardId(new Index("fake"), 1);
    DirectoryReader ir = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(iw, true), shardId);
    assertEquals(2, ir.numDocs());
    assertEquals(1, ir.leaves().size());

    // delete id:0 and reopen
    iw.deleteDocuments(new Term("id", "0"));
    DirectoryReader ir2 = DirectoryReader.openIfChanged(ir);

    // we should have the same cache key as before
    assertEquals(1, ir2.numDocs());
    assertEquals(1, ir2.leaves().size());
    assertSame(ir.leaves().get(0).reader().getCoreCacheKey(), ir2.leaves().get(0).reader().getCoreCacheKey());
    IOUtils.close(ir, ir2, iw, dir);
}