List of usage examples for org.apache.lucene.index IndexWriter deleteDocuments
public long deleteDocuments(Query... queries) throws IOException
From source file:org.elasticsearch.test.unit.common.lucene.uid.VersionsTests.java
License:Apache License
@Test public void testVersions() throws Exception { Directory dir = newDirectory();/*from w ww.java 2 s . c om*/ IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER)); DirectoryReader directoryReader = DirectoryReader.open(writer, true); MatcherAssert.assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(Versions.NOT_FOUND)); Document doc = new Document(); doc.add(new Field(UidFieldMapper.NAME, "1", UidFieldMapper.Defaults.FIELD_TYPE)); writer.addDocument(doc); directoryReader = reopen(directoryReader); assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(Versions.NOT_SET)); assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(Versions.NOT_SET)); doc = new Document(); doc.add(new Field(UidFieldMapper.NAME, "1", UidFieldMapper.Defaults.FIELD_TYPE)); doc.add(new NumericDocValuesField(UidFieldMapper.VERSION, 1)); writer.updateDocument(new Term(UidFieldMapper.NAME, "1"), doc); directoryReader = reopen(directoryReader); assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(1l)); assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(1l)); doc = new Document(); Field uid = new Field(UidFieldMapper.NAME, "1", UidFieldMapper.Defaults.FIELD_TYPE); Field version = new NumericDocValuesField(UidFieldMapper.VERSION, 2); doc.add(uid); doc.add(version); writer.updateDocument(new Term(UidFieldMapper.NAME, "1"), doc); directoryReader = reopen(directoryReader); assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(2l)); assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(2l)); // test reuse of uid field doc = new Document(); version.setLongValue(3); doc.add(uid); doc.add(version); writer.updateDocument(new Term(UidFieldMapper.NAME, "1"), doc); directoryReader = reopen(directoryReader); assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(3l)); assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(3l)); writer.deleteDocuments(new Term(UidFieldMapper.NAME, "1")); directoryReader = reopen(directoryReader); assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(Versions.NOT_FOUND)); assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), nullValue()); directoryReader.close(); writer.close(); dir.close(); }
From source file:org.elasticsearch.test.unit.common.lucene.uid.VersionsTests.java
License:Apache License
@Test public void testNestedDocuments() throws IOException { Directory dir = newDirectory();//from w w w . j a v a2 s . co m IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER)); List<Document> docs = new ArrayList<Document>(); for (int i = 0; i < 4; ++i) { // Nested Document doc = new Document(); doc.add(new Field(UidFieldMapper.NAME, "1", UidFieldMapper.Defaults.NESTED_FIELD_TYPE)); docs.add(doc); } // Root Document doc = new Document(); doc.add(new Field(UidFieldMapper.NAME, "1", UidFieldMapper.Defaults.FIELD_TYPE)); NumericDocValuesField version = new NumericDocValuesField(UidFieldMapper.VERSION, 5L); doc.add(version); docs.add(doc); writer.updateDocuments(new Term(UidFieldMapper.NAME, "1"), docs); DirectoryReader directoryReader = DirectoryReader.open(writer, true); assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(5l)); assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(5l)); version.setLongValue(6L); writer.updateDocuments(new Term(UidFieldMapper.NAME, "1"), docs); version.setLongValue(7L); writer.updateDocuments(new Term(UidFieldMapper.NAME, "1"), docs); directoryReader = reopen(directoryReader); assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(7l)); assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")).version, equalTo(7l)); writer.deleteDocuments(new Term(UidFieldMapper.NAME, "1")); directoryReader = reopen(directoryReader); assertThat(Versions.loadVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), equalTo(Versions.NOT_FOUND)); assertThat(Versions.loadDocIdAndVersion(directoryReader, new Term(UidFieldMapper.NAME, "1")), nullValue()); directoryReader.close(); writer.close(); dir.close(); }
From source file:org.elasticsearch.test.unit.index.cache.filter.FilterCacheTests.java
License:Apache License
private void verifyCache(FilterCache filterCache) throws Exception { Directory dir = new RAMDirectory(); IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.VERSION, Lucene.STANDARD_ANALYZER)); DirectoryReader reader = IndexReader.open(indexWriter, true); for (int i = 0; i < 100; i++) { Document document = new Document(); document.add(new TextField("id", Integer.toString(i), Field.Store.YES)); indexWriter.addDocument(document); }// ww w . jav a 2 s. c om reader = refreshReader(reader); IndexSearcher searcher = new IndexSearcher(reader); assertThat( Lucene.count(searcher, new ConstantScoreQuery(filterCache.cache(new TermFilter(new Term("id", "1"))))), equalTo(1l)); assertThat(Lucene.count(searcher, new XFilteredQuery(new MatchAllDocsQuery(), filterCache.cache(new TermFilter(new Term("id", "1"))))), equalTo(1l)); indexWriter.deleteDocuments(new Term("id", "1")); reader = refreshReader(reader); searcher = new IndexSearcher(reader); TermFilter filter = new TermFilter(new Term("id", "1")); Filter cachedFilter = filterCache.cache(filter); long constantScoreCount = filter == cachedFilter ? 0 : 1; // sadly, when caching based on cacheKey with NRT, this fails, that's why we have DeletionAware one assertThat(Lucene.count(searcher, new ConstantScoreQuery(cachedFilter)), equalTo(constantScoreCount)); assertThat(Lucene.count(searcher, new XConstantScoreQuery(cachedFilter)), equalTo(0l)); assertThat(Lucene.count(searcher, new XFilteredQuery(new MatchAllDocsQuery(), cachedFilter)), equalTo(0l)); indexWriter.close(); }
From source file:org.elasticsearch.xpack.core.security.authz.accesscontrol.DocumentSubsetReaderTests.java
License:Open Source License
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/32457") public void testSearch() throws Exception { IndexWriter iw = new IndexWriter(directory, newIndexWriterConfig()); Document document = new Document(); document.add(new StringField("field", "value1", Field.Store.NO)); iw.addDocument(document);/*from ww w . j a v a 2s .c om*/ document = new Document(); document.add(new StringField("field", "value2", Field.Store.NO)); iw.addDocument(document); document = new Document(); document.add(new StringField("field", "value3", Field.Store.NO)); iw.addDocument(document); document = new Document(); document.add(new StringField("field", "value4", Field.Store.NO)); iw.addDocument(document); iw.forceMerge(1); iw.deleteDocuments(new Term("field", "value3")); iw.close(); openDirectoryReader(); IndexSearcher indexSearcher = new IndexSearcher(DocumentSubsetReader.wrap(directoryReader, bitsetFilterCache, new TermQuery(new Term("field", "value1")))); assertThat(indexSearcher.getIndexReader().numDocs(), equalTo(1)); TopDocs result = indexSearcher.search(new MatchAllDocsQuery(), 1); assertThat(result.totalHits, equalTo(1L)); assertThat(result.scoreDocs[0].doc, equalTo(0)); indexSearcher = new IndexSearcher(DocumentSubsetReader.wrap(directoryReader, bitsetFilterCache, new TermQuery(new Term("field", "value2")))); assertThat(indexSearcher.getIndexReader().numDocs(), equalTo(1)); result = indexSearcher.search(new MatchAllDocsQuery(), 1); assertThat(result.totalHits, equalTo(1L)); assertThat(result.scoreDocs[0].doc, equalTo(1)); // this doc has been marked as deleted: indexSearcher = new IndexSearcher(DocumentSubsetReader.wrap(directoryReader, bitsetFilterCache, new TermQuery(new Term("field", "value3")))); assertThat(indexSearcher.getIndexReader().numDocs(), equalTo(0)); result = indexSearcher.search(new MatchAllDocsQuery(), 1); assertThat(result.totalHits, equalTo(0L)); indexSearcher = new IndexSearcher(DocumentSubsetReader.wrap(directoryReader, bitsetFilterCache, new TermQuery(new Term("field", "value4")))); assertThat(indexSearcher.getIndexReader().numDocs(), equalTo(1)); result = indexSearcher.search(new MatchAllDocsQuery(), 1); assertThat(result.totalHits, equalTo(1L)); assertThat(result.scoreDocs[0].doc, equalTo(3)); }
From source file:org.elasticsearch.xpack.core.security.authz.accesscontrol.DocumentSubsetReaderTests.java
License:Open Source License
/** Same test as in FieldSubsetReaderTests, test that core cache key (needed for NRT) is working */ public void testCoreCacheKey() throws Exception { Directory dir = newDirectory();/* www .jav a 2 s . c om*/ IndexWriterConfig iwc = new IndexWriterConfig(null); iwc.setMaxBufferedDocs(100); iwc.setMergePolicy(NoMergePolicy.INSTANCE); IndexWriter iw = new IndexWriter(dir, iwc); // add two docs, id:0 and id:1 Document doc = new Document(); Field idField = new StringField("id", "", Field.Store.NO); doc.add(idField); idField.setStringValue("0"); iw.addDocument(doc); idField.setStringValue("1"); iw.addDocument(doc); // open reader DirectoryReader ir = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(iw), new ShardId("_index", "_na_", 0)); ir = DocumentSubsetReader.wrap(ir, bitsetFilterCache, new MatchAllDocsQuery()); assertEquals(2, ir.numDocs()); assertEquals(1, ir.leaves().size()); // delete id:0 and reopen iw.deleteDocuments(new Term("id", "0")); DirectoryReader ir2 = DirectoryReader.openIfChanged(ir); // we should have the same cache key as before assertEquals(1, ir2.numDocs()); assertEquals(1, ir2.leaves().size()); assertSame(ir.leaves().get(0).reader().getCoreCacheHelper().getKey(), ir2.leaves().get(0).reader().getCoreCacheHelper().getKey()); // However we don't support caching on the reader cache key since we override deletes assertNull(ir.leaves().get(0).reader().getReaderCacheHelper()); assertNull(ir2.leaves().get(0).reader().getReaderCacheHelper()); TestUtil.checkReader(ir); IOUtils.close(ir, ir2, iw, dir); }
From source file:org.elasticsearch.xpack.core.security.authz.accesscontrol.FieldSubsetReaderTests.java
License:Open Source License
/** test that core cache key (needed for NRT) is working */ public void testCoreCacheKey() throws Exception { Directory dir = newDirectory();/* w w w . j ava 2 s. co m*/ IndexWriterConfig iwc = new IndexWriterConfig(null); iwc.setMaxBufferedDocs(100); iwc.setMergePolicy(NoMergePolicy.INSTANCE); IndexWriter iw = new IndexWriter(dir, iwc); // add two docs, id:0 and id:1 Document doc = new Document(); Field idField = new StringField("id", "", Field.Store.NO); doc.add(idField); idField.setStringValue("0"); iw.addDocument(doc); idField.setStringValue("1"); iw.addDocument(doc); // open reader DirectoryReader ir = FieldSubsetReader.wrap(DirectoryReader.open(iw), new CharacterRunAutomaton(Automata.makeString("id"))); assertEquals(2, ir.numDocs()); assertEquals(1, ir.leaves().size()); // delete id:0 and reopen iw.deleteDocuments(new Term("id", "0")); DirectoryReader ir2 = DirectoryReader.openIfChanged(ir); // we should have the same cache key as before assertEquals(1, ir2.numDocs()); assertEquals(1, ir2.leaves().size()); assertSame(ir.leaves().get(0).reader().getCoreCacheHelper().getKey(), ir2.leaves().get(0).reader().getCoreCacheHelper().getKey()); TestUtil.checkReader(ir); IOUtils.close(ir, ir2, iw, dir); }
From source file:org.elasticsearch.xpack.core.security.authz.accesscontrol.SecurityIndexSearcherWrapperIntegrationTests.java
License:Open Source License
public void testDLS() throws Exception { ShardId shardId = new ShardId("_index", "_na_", 0); MapperService mapperService = mock(MapperService.class); ScriptService scriptService = mock(ScriptService.class); when(mapperService.documentMapper()).thenReturn(null); when(mapperService.simpleMatchToFullName(anyString())) .then(invocationOnMock -> Collections.singletonList((String) invocationOnMock.getArguments()[0])); ThreadContext threadContext = new ThreadContext(Settings.EMPTY); IndicesAccessControl.IndexAccessControl indexAccessControl = new IndicesAccessControl.IndexAccessControl( true, new FieldPermissions(), singleton(new BytesArray("{\"match_all\" : {}}"))); IndexSettings indexSettings = IndexSettingsModule.newIndexSettings(shardId.getIndex(), Settings.EMPTY); Client client = mock(Client.class); when(client.settings()).thenReturn(Settings.EMPTY); final long nowInMillis = randomNonNegativeLong(); QueryShardContext realQueryShardContext = new QueryShardContext(shardId.id(), indexSettings, null, null, mapperService, null, null, xContentRegistry(), writableRegistry(), client, null, () -> nowInMillis, null);//from ww w . j ava 2 s. co m QueryShardContext queryShardContext = spy(realQueryShardContext); IndexSettings settings = IndexSettingsModule.newIndexSettings("_index", Settings.EMPTY); BitsetFilterCache bitsetFilterCache = new BitsetFilterCache(settings, new BitsetFilterCache.Listener() { @Override public void onCache(ShardId shardId, Accountable accountable) { } @Override public void onRemoval(ShardId shardId, Accountable accountable) { } }); XPackLicenseState licenseState = mock(XPackLicenseState.class); when(licenseState.isDocumentAndFieldLevelSecurityAllowed()).thenReturn(true); when(licenseState.isSecurityEnabled()).thenReturn(true); SecurityIndexSearcherWrapper wrapper = new SecurityIndexSearcherWrapper(indexSettings, s -> queryShardContext, bitsetFilterCache, threadContext, licenseState, scriptService) { @Override protected IndicesAccessControl getIndicesAccessControl() { return new IndicesAccessControl(true, singletonMap("_index", indexAccessControl)); } }; Directory directory = newDirectory(); IndexWriter iw = new IndexWriter(directory, new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(NoMergePolicy.INSTANCE)); int numValues = scaledRandomIntBetween(2, 16); String[] values = new String[numValues]; for (int i = 0; i < numValues; i++) { values[i] = "value" + i; } int[] valuesHitCount = new int[numValues]; int numDocs = scaledRandomIntBetween(32, 128); int commitAfter = scaledRandomIntBetween(1, numDocs); logger.info( "Going to index [{}] documents with [{}] unique values and commit after [{}] documents have been indexed", numDocs, numValues, commitAfter); for (int doc = 1; doc <= numDocs; doc++) { int valueIndex = (numValues - 1) % doc; Document document = new Document(); String id = String.valueOf(doc); document.add(new StringField("id", id, Field.Store.NO)); String value = values[valueIndex]; document.add(new StringField("field", value, Field.Store.NO)); iw.addDocument(document); if (doc % 11 == 0) { iw.deleteDocuments(new Term("id", id)); } else { if (commitAfter % commitAfter == 0) { iw.commit(); } valuesHitCount[valueIndex]++; } } iw.close(); StringBuilder valueToHitCountOutput = new StringBuilder(); for (int i = 0; i < numValues; i++) { valueToHitCountOutput.append(values[i]).append('\t').append(valuesHitCount[i]).append('\n'); } logger.info("Value count matrix:\n{}", valueToHitCountOutput); DirectoryReader directoryReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(directory), shardId); for (int i = 0; i < numValues; i++) { ParsedQuery parsedQuery = new ParsedQuery(new TermQuery(new Term("field", values[i]))); doReturn(new TermQueryBuilder("field", values[i])).when(queryShardContext) .parseInnerQueryBuilder(any(XContentParser.class)); when(queryShardContext.toFilter(new TermsQueryBuilder("field", values[i]))).thenReturn(parsedQuery); DirectoryReader wrappedDirectoryReader = wrapper.wrap(directoryReader); IndexSearcher indexSearcher = wrapper.wrap(new IndexSearcher(wrappedDirectoryReader)); int expectedHitCount = valuesHitCount[i]; logger.info("Going to verify hit count with query [{}] with expected total hits [{}]", parsedQuery.query(), expectedHitCount); TotalHitCountCollector countCollector = new TotalHitCountCollector(); indexSearcher.search(new MatchAllDocsQuery(), countCollector); assertThat(countCollector.getTotalHits(), equalTo(expectedHitCount)); assertThat(wrappedDirectoryReader.numDocs(), equalTo(expectedHitCount)); } bitsetFilterCache.close(); directoryReader.close(); directory.close(); }
From source file:org.elasticsearch.xpack.core.security.authz.accesscontrol.SecurityIndexSearcherWrapperUnitTests.java
License:Open Source License
public void testIntersectScorerAndRoleBits() throws Exception { securityIndexSearcherWrapper = new SecurityIndexSearcherWrapper(indexSettings, null, null, threadContext, licenseState, scriptService); final Directory directory = newDirectory(); IndexWriter iw = new IndexWriter(directory, new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(NoMergePolicy.INSTANCE)); Document document = new Document(); document.add(new StringField("field1", "value1", Field.Store.NO)); document.add(new StringField("field2", "value1", Field.Store.NO)); iw.addDocument(document);// w w w . j a v a2 s . c o m document = new Document(); document.add(new StringField("field1", "value2", Field.Store.NO)); document.add(new StringField("field2", "value1", Field.Store.NO)); iw.addDocument(document); document = new Document(); document.add(new StringField("field1", "value3", Field.Store.NO)); document.add(new StringField("field2", "value1", Field.Store.NO)); iw.addDocument(document); document = new Document(); document.add(new StringField("field1", "value4", Field.Store.NO)); document.add(new StringField("field2", "value1", Field.Store.NO)); iw.addDocument(document); iw.commit(); iw.deleteDocuments(new Term("field1", "value3")); iw.close(); DirectoryReader directoryReader = DirectoryReader.open(directory); IndexSearcher searcher = new IndexSearcher(directoryReader); Weight weight = searcher.createNormalizedWeight(new TermQuery(new Term("field2", "value1")), false); LeafReaderContext leaf = directoryReader.leaves().get(0); SparseFixedBitSet sparseFixedBitSet = query(leaf, "field1", "value1"); LeafCollector leafCollector = new LeafBucketCollector() { @Override public void collect(int doc, long bucket) throws IOException { assertThat(doc, equalTo(0)); } }; intersectScorerAndRoleBits(weight.scorer(leaf), sparseFixedBitSet, leafCollector, leaf.reader().getLiveDocs()); sparseFixedBitSet = query(leaf, "field1", "value2"); leafCollector = new LeafBucketCollector() { @Override public void collect(int doc, long bucket) throws IOException { assertThat(doc, equalTo(1)); } }; intersectScorerAndRoleBits(weight.scorer(leaf), sparseFixedBitSet, leafCollector, leaf.reader().getLiveDocs()); sparseFixedBitSet = query(leaf, "field1", "value3"); leafCollector = new LeafBucketCollector() { @Override public void collect(int doc, long bucket) throws IOException { fail("docId [" + doc + "] should have been deleted"); } }; intersectScorerAndRoleBits(weight.scorer(leaf), sparseFixedBitSet, leafCollector, leaf.reader().getLiveDocs()); sparseFixedBitSet = query(leaf, "field1", "value4"); leafCollector = new LeafBucketCollector() { @Override public void collect(int doc, long bucket) throws IOException { assertThat(doc, equalTo(3)); } }; intersectScorerAndRoleBits(weight.scorer(leaf), sparseFixedBitSet, leafCollector, leaf.reader().getLiveDocs()); directoryReader.close(); directory.close(); }
From source file:org.elasticsearch.xpack.core.security.authz.accesscontrol.SecurityIndexSearcherWrapperUnitTests.java
License:Open Source License
public void doTestIndexSearcherWrapper(boolean sparse, boolean deletions) throws IOException { Directory dir = newDirectory();/*from w w w .j a v a2 s. c o m*/ IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(null)); Document doc = new Document(); StringField allowedField = new StringField("allowed", "yes", Store.NO); doc.add(allowedField); StringField fooField = new StringField("foo", "bar", Store.NO); doc.add(fooField); StringField deleteField = new StringField("delete", "no", Store.NO); doc.add(deleteField); w.addDocument(doc); if (deletions) { // add a document that matches foo:bar but will be deleted deleteField.setStringValue("yes"); w.addDocument(doc); deleteField.setStringValue("no"); } allowedField.setStringValue("no"); w.addDocument(doc); if (sparse) { for (int i = 0; i < 1000; ++i) { w.addDocument(doc); } w.forceMerge(1); } w.deleteDocuments(new Term("delete", "yes")); IndexSettings settings = IndexSettingsModule.newIndexSettings("_index", Settings.EMPTY); BitsetFilterCache.Listener listener = new BitsetFilterCache.Listener() { @Override public void onCache(ShardId shardId, Accountable accountable) { } @Override public void onRemoval(ShardId shardId, Accountable accountable) { } }; DirectoryReader reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(w), new ShardId(indexSettings.getIndex(), 0)); BitsetFilterCache cache = new BitsetFilterCache(settings, listener); Query roleQuery = new TermQuery(new Term("allowed", "yes")); BitSet bitSet = cache.getBitSetProducer(roleQuery).getBitSet(reader.leaves().get(0)); if (sparse) { assertThat(bitSet, instanceOf(SparseFixedBitSet.class)); } else { assertThat(bitSet, instanceOf(FixedBitSet.class)); } DocumentSubsetDirectoryReader filteredReader = DocumentSubsetReader.wrap(reader, cache, roleQuery); IndexSearcher searcher = new SecurityIndexSearcherWrapper.IndexSearcherWrapper(filteredReader); // Searching a non-existing term will trigger a null scorer assertEquals(0, searcher.count(new TermQuery(new Term("non_existing_field", "non_existing_value")))); assertEquals(1, searcher.count(new TermQuery(new Term("foo", "bar")))); // make sure scorers are created only once, see #1725 assertEquals(1, searcher.count(new CreateScorerOnceQuery(new MatchAllDocsQuery()))); IOUtils.close(reader, w, dir); }
From source file:org.entando.entando.aps.system.services.dataobjectsearchengine.IndexerDAO.java
License:Open Source License
/** * Cancella un documento.//from w w w.java 2s . c o m * * @param name Il nome del campo Field da utilizzare per recupero del * documento. * @param value La chiave mediante il quale stato indicizzato il * documento. * @throws ApsSystemException In caso di errore */ @Override public synchronized void delete(String name, String value) throws ApsSystemException { try { IndexWriter writer = new IndexWriter(this._dir, this.getIndexWriterConfig()); writer.deleteDocuments(new Term(name, value)); writer.close(); } catch (IOException e) { _logger.error("Error deleting document", e); throw new ApsSystemException("Error deleting document", e); } }