Example usage for org.apache.lucene.index IndexWriter deleteAll

List of usage examples for org.apache.lucene.index IndexWriter deleteAll

Introduction

In this page you can find the example usage for org.apache.lucene.index IndexWriter deleteAll.

Prototype

@SuppressWarnings("try")
public long deleteAll() throws IOException 

Source Link

Document

Delete all documents in the index.

Usage

From source file:org.apache.cxf.systest.jaxrs.extraction.BookCatalog.java

License:Apache License

@DELETE
public Response delete() throws IOException {
    final IndexWriter writer = getIndexWriter();

    try {//  w ww  .  j  a va2 s. co  m
        writer.deleteAll();
        writer.commit();
    } finally {
        writer.close();
    }

    return Response.ok().build();
}

From source file:org.eclipse.dirigible.repository.ext.indexing.LuceneMemoryIndexer.java

License:Open Source License

@Override
public void clearIndex() throws EIndexingException {

    try {/*  w  w w  .  j  ava  2  s.  com*/
        synchronized (directory) {

            logger.debug("entering: clearIndex() : " + indexName); //$NON-NLS-1$

            Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_35);
            IndexWriterConfig config = null;
            IndexWriter iwriter = null;
            try {
                config = new IndexWriterConfig(Version.LUCENE_35, analyzer);
                iwriter = new IndexWriter(directory, config);
                iwriter.deleteAll();
            } finally {
                if (iwriter != null) {
                    iwriter.close();
                }
            }
            logger.debug("exiting: clearIndex() : " + indexName); //$NON-NLS-1$
        }
    } catch (Exception e) {
        throw new EIndexingException(e);
    }
}

From source file:org.eclipse.dirigible.repository.ext.indexing.RepositoryMemoryIndexer.java

License:Open Source License

public static void clearIndex() throws IOException {

    try {//from ww w  .  ja  v  a  2  s.c  o m
        synchronized (directory) {

            logger.debug("entering: clearIndex()"); //$NON-NLS-1$

            Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_35);
            IndexWriterConfig config = null;
            IndexWriter iwriter = null;
            try {
                config = new IndexWriterConfig(Version.LUCENE_35, analyzer);
                iwriter = new IndexWriter(directory, config);
                iwriter.deleteAll();
            } finally {
                if (iwriter != null) {
                    iwriter.close();
                }
            }
            logger.debug("exiting: clearIndex()"); //$NON-NLS-1$
        }
    } catch (Exception e) {
        throw new IOException(e);
    }
}

From source file:org.elasticsearch.index.mapper.core.ScaledFloatFieldTypeTests.java

License:Apache License

public void testStats() throws IOException {
    ScaledFloatFieldMapper.ScaledFloatFieldType ft = new ScaledFloatFieldMapper.ScaledFloatFieldType();
    ft.setName("scaled_float");
    ft.setScalingFactor(0.1 + randomDouble() * 100);
    Directory dir = newDirectory();/* w  w  w. j a  v  a  2  s.c  o  m*/
    IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(null));
    try (DirectoryReader reader = DirectoryReader.open(w)) {
        assertNull(ft.stats(reader));
    }
    Document doc = new Document();
    LongPoint point = new LongPoint("scaled_float", -1);
    doc.add(point);
    w.addDocument(doc);
    point.setLongValue(10);
    w.addDocument(doc);
    try (DirectoryReader reader = DirectoryReader.open(w)) {
        FieldStats<?> stats = ft.stats(reader);
        assertEquals(-1 / ft.getScalingFactor(), stats.getMinValue());
        assertEquals(10 / ft.getScalingFactor(), stats.getMaxValue());
        assertEquals(2, stats.getMaxDoc());
    }
    w.deleteAll();
    try (DirectoryReader reader = DirectoryReader.open(w)) {
        assertNull(ft.stats(reader));
    }
    IOUtils.close(w, dir);
}

From source file:org.jabylon.index.properties.jobs.impl.ReorgIndexJob.java

License:Open Source License

public static void indexWorkspace(RepositoryConnector connector, IProgressMonitor monitor)
        throws CorruptIndexException, IOException {
    long time = System.currentTimeMillis();
    logger.info("Reorg of search index started");
    IndexWriter writer = null;
    CDONet4jSession session = null;//from www.  j a  va2 s .co m
    SubMonitor submon = SubMonitor.convert(monitor, 100);
    try {
        writer = IndexActivator.getDefault().obtainIndexWriter();
        writer.deleteAll();
        session = connector.createSession();
        CDOView view = connector.openView(session);
        CDOResource resource = view.getResource(ServerConstants.WORKSPACE_RESOURCE);
        Workspace workspace = (Workspace) resource.getContents().get(0);
        indexWorkspace(workspace, writer, submon.newChild(95));
        indexTMX(writer, submon.newChild(5));
        writer.commit();
    } catch (OutOfMemoryError error) {
        logger.error("Out of memory during index reorg", error);
        //As suggested by lucene documentation
        writer.close();
    } catch (Exception e) {
        logger.error("Exception during index reorg. Rolling back", e);
        if (writer != null)
            writer.rollback();
        throw new IllegalStateException("Failed to write index", e);
    } finally {
        if (monitor != null)
            monitor.done();
        if (session != null) {
            session.close();
        }
        IndexActivator.getDefault().returnIndexWriter(writer);
    }
    long duration = (System.currentTimeMillis() - time) / 1000;
    logger.info("Search Index Reorg finished. Took {} seconds", duration);
}

From source file:org.niord.core.message.MessageLuceneIndex.java

License:Apache License

/**
 * Deletes the current index/*from   w ww .  j  a  v a2s .  c om*/
 */
private void deleteIndex() throws IOException {
    // Delete the index
    IndexWriter writer = null;
    try {
        writer = getNewWriter();
        writer.deleteAll();
        writer.setCommitData(new HashMap<>());
        writer.commit();
    } finally {
        closeWriter(writer);
    }
}

From source file:org.rapidpm.modul.javafx.searchbox.searchbox.SearchBox.java

License:Apache License

public void refreshIndex(final List<T> itemListe) {
    try {// w w w .j  a  v  a  2  s.  co  m
        sem.acquire();
        this.itemListe = itemListe;
        final IndexWriter writer = new IndexWriter(idx, new IndexWriterConfig(Version.LUCENE_43, analyzer));
        writer.deleteAll();
        tablevalues.clear();
        for (final T item : itemListe) {
            addElementToIndex(writer, item);
        }
        writer.close();
        final DirectoryReader reader = DirectoryReader.open(idx);
        indexSearcher = new IndexSearcher(reader);
    } catch (IOException e) {
        logger.error(e);
    } catch (InterruptedException e) {
        logger.error(e);
    } finally {
        sem.release();
    }
}

From source file:org.segrada.search.lucene.LuceneSearchEngine.java

License:Apache License

@Override
public synchronized void clearAllIndexes() {
    try {/*from ww w .  j ava  2 s  . c o m*/
        // init index writer config
        IndexWriterConfig indexWriterConfig = new IndexWriterConfig(Version.LUCENE_47, this.analyzer);

        // create new index writer
        IndexWriter iWriter = new IndexWriter(directory, indexWriterConfig);

        iWriter.deleteAll();

        iWriter.close();
    } catch (Exception e) {
        logger.warn("Error while deleting all entries", e);
    }
}

From source file:org.splevo.vpm.analyzer.semantic.lucene.Indexer.java

License:Open Source License

/**
 * Deletes all contents from the main index.
 *
 * @throws IOException//from   ww w .j  av a  2s.c  o m
 *             Failed to clear index.
 */
public void clearIndex() throws IOException {
    IndexWriter indexWriter = createIndexWriter();
    indexWriter.deleteAll();
    indexWriter.close();
    directory.close();
    directory = new RAMDirectory();
}

From source file:org.talend.dataquality.semantic.broadcast.BroadcastIndexObjectTest.java

License:Open Source License

@Test
public void testCreateAndGet() throws URISyntaxException, IOException {
    // init a local index
    final File testFolder = new File("target/broadcast");
    if (testFolder.exists()) {
        FileUtils.deleteDirectory(testFolder);
    }//from   w ww .  j a  v  a 2  s .c o  m
    try {
        FSDirectory testDir = FSDirectory.open(testFolder);
        IndexWriter writer = new IndexWriter(testDir,
                new IndexWriterConfig(Version.LATEST, new StandardAnalyzer(CharArraySet.EMPTY_SET)));
        if (writer.maxDoc() > 0) {
            writer.deleteAll();
            writer.commit();
        }
        for (String key : TEST_INDEX_CONTENT.keySet()) {
            Document doc = DictionaryUtils.generateDocument("TEST", key, key,
                    new HashSet<>(Arrays.asList(TEST_INDEX_CONTENT.get(key))));
            writer.addDocument(doc);
        }

        // here we add an extra document and remove it later.
        Document doc = DictionaryUtils.generateDocument("TEST", "DE_LAND", "DE_LAND",
                new HashSet<>(Arrays.asList(new String[] { "Bayern" })));
        writer.addDocument(doc);
        writer.commit();

        // when a document is deleted from lucene index, it's marked as deleted, but not physically deleted.
        // we need to assure that it's not propagated to Spark cluster
        writer.deleteDocuments(new Term(DictionarySearcher.F_CATID, "DE_LAND"));
        writer.commit();

        writer.close();
    } catch (IOException e1) {
        // TODO Auto-generated catch block
        e1.printStackTrace();
    }

    // create the broadcast object from local index
    final Directory cpDir = ClassPathDirectory.open(testFolder.toURI());
    final BroadcastIndexObject bio = new BroadcastIndexObject(cpDir, true);
    // get the RamDirectory from BroadcastIndexObject
    final Directory ramDir = bio.get();

    // assertions
    try {
        DirectoryReader cpDirReader = DirectoryReader.open(cpDir);
        assertEquals("Unexpected document count in created index. ", TEST_INDEX_CONTENT.size(),
                cpDirReader.numDocs());
        DirectoryReader ramDirReader = DirectoryReader.open(ramDir);
        assertEquals("Unexpected document count in created index. ", TEST_INDEX_CONTENT.size(),
                ramDirReader.numDocs());
        for (int i = 0; i < TEST_INDEX_CONTENT.size(); i++) {
            Document doc = cpDirReader.document(i);
            String cpWord = doc.getField(DictionarySearcher.F_CATID).stringValue();
            Document ramDoc = ramDirReader.document(i);
            String ramWord = ramDoc.getField(DictionarySearcher.F_CATID).stringValue();
            assertEquals("Unexpected word", cpWord, ramWord);
        }
    } catch (IOException e) {
        e.printStackTrace();
    }

}