Example usage for org.apache.lucene.index IndexWriter deleteDocuments

List of usage examples for org.apache.lucene.index IndexWriter deleteDocuments

Introduction

In this page you can find the example usage for org.apache.lucene.index IndexWriter deleteDocuments.

Prototype

public long deleteDocuments(Query... queries) throws IOException 

Source Link

Document

Deletes the document(s) matching any of the provided queries.

Usage

From source file:nl.strohalm.cyclos.utils.lucene.IndexOperationRunner.java

License:Open Source License

/**
 * Removes the given entities from the index
 *//*from   w  w  w  .  j  a v a  2s.  c  o m*/
private void remove(final Class<? extends Indexable> entityType, final Long id) {
    final IndexWriter writer = getWriter(entityType);
    try {
        writer.deleteDocuments(new TermQuery(new Term("id", id.toString())));
        commit(entityType, writer);
    } catch (CorruptIndexException e) {
        handleIndexCorrupted(entityType);
    } catch (final Exception e) {
        LOG.warn("Error removing from index " + ClassHelper.getClassName(entityType) + "#" + id, e);
        rollback(entityType, writer);
    }
}

From source file:org.abstracthorizon.proximity.indexer.LuceneIndexer.java

License:Apache License

protected synchronized void doDeleteItemProperties(ItemProperties ip) throws StorageException {
    logger.debug("Deleting item from index");
    try {/*w  w w  .  j av a2s. com*/
        IndexWriter writer = new IndexWriter(indexDirectory, analyzer, false);
        writer.deleteDocuments(new Term("UID", getItemUid(ip)));
        logger.debug("Deleted item from index for UID={}", getItemUid(ip));
        dirtyItems = dirtyItems + 1;

        if (dirtyItems > dirtyItemTreshold) {
            logger.debug("Optimizing Lucene index as dirtyItemTreshold is exceeded.");
            writer.optimize();
            dirtyItems = 0;
        }
        writer.close();

    } catch (IOException ex) {
        throw new StorageException("Got IOException during deletion.", ex);
    }
}

From source file:org.apache.blur.lucene.security.IndexSearcherTest.java

License:Apache License

private void runTest(int expected, Collection<String> readAuthorizations,
        Collection<String> discoverAuthorizations, Collection<String> discoverableFields)
        throws IOException, ParseException {
    IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_43, new StandardAnalyzer(Version.LUCENE_43));
    Directory dir = new RAMDirectory();
    {/*ww w .j av  a  2  s . co m*/
        IndexWriter writer = new IndexWriter(dir, conf);
        writer.addDocument(getEmpty());
        writer.commit();
        writer.addDocument(getDoc(0, "(a&b)|d", null, "f1", "f2"));
        writer.addDocument(getDoc(1, "a&b&c", null, "f1", "f2"));
        writer.addDocument(getDoc(2, "a&b&c&e", "a&b&c", "f1", "f2"));
        writer.addDocument(getDoc(3, null, null, "f1", "f2"));// can't find
        writer.close(false);
    }
    DirectoryReader reader = DirectoryReader.open(dir);
    validate(expected, 2, readAuthorizations, discoverAuthorizations, discoverableFields, dir, reader);
    {
        IndexWriter writer = new IndexWriter(dir, conf);
        writer.deleteDocuments(new Term("id", "0"));
        writer.addDocument(getDoc(0, "(a&b)|d", null, "f1", "f2"));
        writer.close(false);
    }
    reader = DirectoryReader.openIfChanged(reader);
    validate(expected, 3, readAuthorizations, discoverAuthorizations, discoverableFields, dir, reader);
    {
        IndexWriter writer = new IndexWriter(dir, conf);
        writer.deleteDocuments(new Term("id", "1"));
        writer.addDocument(getDoc(1, "a&b&c", null, "f1", "f2"));
        writer.close(false);
    }
    reader = DirectoryReader.openIfChanged(reader);
    validate(expected, 4, readAuthorizations, discoverAuthorizations, discoverableFields, dir, reader);
}

From source file:org.apache.blur.manager.writer.IndexImporter.java

License:Apache License

private IndexAction getIndexAction(final HdfsDirectory directory, final FileSystem fileSystem) {
    return new IndexAction() {

        @Override//from   w  w w  .ja va 2 s  .co  m
        public void performMutate(IndexSearcherCloseable searcher, IndexWriter writer) throws IOException {
            LOG.info("About to import [{0}] into [{1}/{2}]", directory, _shard, _table);
            boolean emitDeletes = searcher.getIndexReader().numDocs() != 0;
            applyDeletes(directory, writer, _shard, emitDeletes);
            LOG.info("Add index [{0}] [{1}/{2}]", directory, _shard, _table);
            writer.addIndexes(directory);
            LOG.info("Removing delete markers [{0}] on [{1}/{2}]", directory, _shard, _table);
            writer.deleteDocuments(new Term(BlurConstants.DELETE_MARKER, BlurConstants.DELETE_MARKER_VALUE));
            LOG.info("Finishing import [{0}], commiting on [{1}/{2}]", directory, _shard, _table);
        }

        @Override
        public void doPreCommit(IndexSearcherCloseable indexSearcher, IndexWriter writer) throws IOException {

        }

        @Override
        public void doPostCommit(IndexWriter writer) throws IOException {
            Path path = directory.getPath();
            fileSystem.delete(new Path(path, INPROGRESS), false);
            LOG.info("Import complete on [{0}/{1}]", _shard, _table);
            writer.maybeMerge();
        }

        @Override
        public void doPreRollback(IndexWriter writer) throws IOException {
            LOG.info("Starting rollback on [{0}/{1}]", _shard, _table);
        }

        @Override
        public void doPostRollback(IndexWriter writer) throws IOException {
            LOG.info("Finished rollback on [{0}/{1}]", _shard, _table);
            Path path = directory.getPath();
            String name = path.getName();
            fileSystem.rename(path, new Path(path.getParent(), rename(name, BADROWIDS)));
        }
    };
}

From source file:org.apache.blur.manager.writer.IndexImporter.java

License:Apache License

private void applyDeletes(Directory directory, IndexWriter indexWriter, String shard, boolean emitDeletes)
        throws IOException {
    DirectoryReader reader = DirectoryReader.open(directory);
    try {/*w  w w.ja v  a2 s  .c  o m*/
        LOG.info("Applying deletes in reader [{0}]", reader);
        CompositeReaderContext compositeReaderContext = reader.getContext();
        List<AtomicReaderContext> leaves = compositeReaderContext.leaves();
        BlurPartitioner blurPartitioner = new BlurPartitioner();
        Text key = new Text();
        int numberOfShards = _shardContext.getTableContext().getDescriptor().getShardCount();
        int shardId = ShardUtil.getShardIndex(shard);
        for (AtomicReaderContext context : leaves) {
            AtomicReader atomicReader = context.reader();
            Fields fields = atomicReader.fields();
            Terms terms = fields.terms(BlurConstants.ROW_ID);
            if (terms != null) {
                TermsEnum termsEnum = terms.iterator(null);
                BytesRef ref = null;
                while ((ref = termsEnum.next()) != null) {
                    key.set(ref.bytes, ref.offset, ref.length);
                    int partition = blurPartitioner.getPartition(key, null, numberOfShards);
                    if (shardId != partition) {
                        throw new IOException("Index is corrupted, RowIds are found in wrong shard, partition ["
                                + partition + "] does not shard [" + shardId
                                + "], this can happen when rows are not hashed correctly.");
                    }
                    if (emitDeletes) {
                        indexWriter.deleteDocuments(new Term(BlurConstants.ROW_ID, BytesRef.deepCopyOf(ref)));
                    }
                }
            }
        }
    } finally {
        reader.close();
    }
}

From source file:org.apache.blur.manager.writer.MutatableAction.java

License:Apache License

public void deleteRow(final String rowId) {
    _actions.add(new InternalAction() {
        @Override/*from   w  w  w  .  j ava2s .c  om*/
        void performAction(IndexSearcherCloseable searcher, IndexWriter writer) throws IOException {
            writer.deleteDocuments(createRowId(rowId));
            _writeRowMeter.mark();
        }
    });
}

From source file:org.apache.clerezza.rdf.cris.GraphIndexer.java

License:Apache License

private void indexNamedResource(UriRef uriRef, IndexWriter writer) throws IOException {

    Term term = new Term(URI_FIELD_NAME, uriRef.getUnicodeString());
    writer.deleteDocuments(term);
    //the reindexing might be caused by the removal of a type statement

    GraphNode node = new GraphNode(uriRef, this.baseGraph);
    List<UriRef> types = new ArrayList<UriRef>();
    Lock lock = node.readLock();/*from  ww  w.j a v  a 2 s . c  om*/
    lock.lock();
    try {
        Iterator<Resource> resources = node.getObjects(RDF.type);
        while (resources.hasNext()) {
            Resource res = resources.next();
            if (res instanceof UriRef) {
                types.add((UriRef) res);
            }
        }
    } finally {
        lock.unlock();
    }
    for (UriRef type : types) {
        if (type2IndexedProperties.containsKey(type)) {
            Document doc = resourceToDocument(uriRef, type);
            doc.add(new Field(URI_FIELD_NAME, uriRef.getUnicodeString(), Field.Store.YES,
                    Field.Index.NOT_ANALYZED));
            writer.addDocument(doc);
        }
    }
}

From source file:org.apache.jackrabbit.oak.plugins.index.lucene.LuceneIndexEditor.java

License:Apache License

@Override
public Editor childNodeDeleted(String name, NodeState before) throws CommitFailedException {
    PathFilter.Result filterResult = getPathFilterResult(name);
    if (filterResult == PathFilter.Result.EXCLUDE) {
        return null;
    }/*www .j  a va  2  s . co m*/

    if (!isDeleted) {
        // tree deletion is handled on the parent node
        String path = concat(getPath(), name);
        try {
            IndexWriter writer = context.getWriter();
            // Remove all index entries in the removed subtree
            writer.deleteDocuments(newPathTerm(path));
            writer.deleteDocuments(new PrefixQuery(newPathTerm(path + "/")));
            this.context.indexUpdate();
        } catch (IOException e) {
            throw new CommitFailedException("Lucene", 5,
                    "Failed to remove the index entries of" + " the removed subtree " + path, e);
        }
    }

    MatcherState ms = getMatcherState(name, before);
    if (!ms.isEmpty()) {
        return new LuceneIndexEditor(this, name, ms, filterResult, true);
    }
    return null; // no need to recurse down the removed subtree
}

From source file:org.apache.maven.index.context.DefaultIndexingContext.java

License:Apache License

public synchronized void merge(Directory directory, DocumentFilter filter) throws IOException {
    final IndexSearcher s = acquireIndexSearcher();
    try {// ww  w  . j  ava2s. c o m
        final IndexWriter w = getIndexWriter();
        final IndexReader directoryReader = DirectoryReader.open(directory);
        TopScoreDocCollector collector = null;
        try {
            int numDocs = directoryReader.maxDoc();

            Bits liveDocs = MultiFields.getLiveDocs(directoryReader);
            for (int i = 0; i < numDocs; i++) {
                if (liveDocs != null && !liveDocs.get(i)) {
                    continue;
                }

                Document d = directoryReader.document(i);
                if (filter != null && !filter.accept(d)) {
                    continue;
                }

                String uinfo = d.get(ArtifactInfo.UINFO);
                if (uinfo != null) {
                    collector = TopScoreDocCollector.create(1);
                    s.search(new TermQuery(new Term(ArtifactInfo.UINFO, uinfo)), collector);
                    if (collector.getTotalHits() == 0) {
                        w.addDocument(IndexUtils.updateDocument(d, this, false));
                    }
                } else {
                    String deleted = d.get(ArtifactInfo.DELETED);

                    if (deleted != null) {
                        // Deleting the document loses history that it was delete,
                        // so incrementals wont work. Therefore, put the delete
                        // document in as well
                        w.deleteDocuments(new Term(ArtifactInfo.UINFO, deleted));
                        w.addDocument(d);
                    }
                }
            }

        } finally {
            directoryReader.close();
            commit();
        }

        rebuildGroups();
        Date mergedTimestamp = IndexUtils.getTimestamp(directory);

        if (getTimestamp() != null && mergedTimestamp != null && mergedTimestamp.after(getTimestamp())) {
            // we have both, keep the newest
            updateTimestamp(true, mergedTimestamp);
        } else {
            updateTimestamp(true);
        }
        optimize();
    } finally {
        releaseIndexSearcher(s);
    }
}

From source file:org.apache.maven.index.DefaultIndexerEngine.java

License:Apache License

public void remove(IndexingContext context, ArtifactContext ac) throws IOException {
    if (ac != null) {
        final String uinfo = ac.getArtifactInfo().getUinfo();

        // add artifact deletion marker
        final Document doc = new Document();

        doc.add(new Field(ArtifactInfo.DELETED, uinfo, Field.Store.YES, Field.Index.NO));
        doc.add(new Field(ArtifactInfo.LAST_MODIFIED, //
                Long.toString(System.currentTimeMillis()), Field.Store.YES, Field.Index.NO));

        IndexWriter w = context.getIndexWriter();
        w.addDocument(doc);//from  w ww  . j a  v a2s.  c o m
        w.deleteDocuments(new Term(ArtifactInfo.UINFO, uinfo));
        context.updateTimestamp();
    }
}