Example usage for org.apache.lucene.index IndexWriter deleteAll

List of usage examples for org.apache.lucene.index IndexWriter deleteAll

Introduction

In this page you can find the example usage for org.apache.lucene.index IndexWriter deleteAll.

Prototype

@SuppressWarnings("try")
public long deleteAll() throws IOException 

Source Link

Document

Delete all documents in the index.

Usage

From source file:org.vaadin.netbeans.maven.editor.completion.LuceneAccess.java

License:Apache License

protected void doIndex(Collection<AddOn> addons) {
    Directory directory = null;/*from   www  .j a va 2 s  .  c  o  m*/
    IndexWriter writer = null;
    Analyzer analyzer = null;
    try {
        if (!getIndexDir().exists() && !getIndexDir().mkdirs()) {
            getLogger().log(Level.WARNING, "Unable to create index directory"); // NOI18N
            return;
        }
        analyzer = new StandardAnalyzer(Version.LUCENE_35);

        directory = FSDirectory.open(getIndexDir());
        IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_35, analyzer);
        conf.setOpenMode(OpenMode.CREATE);
        writer = new IndexWriter(directory, conf);
        writer.deleteAll();

        int i = 0;
        for (AddOn addon : addons) {
            Collection<Document> docs = createDocument(addon, i);
            for (Document document : docs) {
                writer.addDocument(document);
            }
            writer.commit();
            i++;
        }
    } catch (IOException e) {
        getLogger().log(Level.INFO, null, e);
    } finally {
        if (writer != null) {
            try {
                writer.close();
            } catch (IOException e) {
                getLogger().log(Level.FINE, null, e);
            }
        }
        if (directory != null) {
            try {
                directory.close();
            } catch (IOException e) {
                getLogger().log(Level.FINE, null, e);
            }
        }
        if (analyzer != null) {
            analyzer.close();
        }
    }
}

From source file:org.wso2.carbon.analytics.dataservice.core.indexing.AnalyticsDataIndexer.java

License:Open Source License

public void clearIndexDataLocal(int tenantId, String tableName) throws AnalyticsIndexException {
    String tableId = this.generateTableId(tenantId, tableName);
    IndexWriter indexWriter;
    TaxonomyWriter taxonomyWriter;//from   w  w  w  .j  a  va  2 s  .c  om
    for (int shardIndex : this.localShards) {
        try {
            indexWriter = this.lookupIndexWriter(shardIndex, tableId);
            indexWriter.deleteAll();
            indexWriter.commit();
            synchronized (this.indexTaxonomyWriters) {
                taxonomyWriter = this.lookupTaxonomyIndexWriter(shardIndex, tableId);
                taxonomyWriter.commit();
                taxonomyWriter.close();
                this.indexTaxonomyWriters.remove(this.generateShardedTableId(shardIndex, tableId));
                FileUtils.deleteDirectory(
                        new File(this.generateDirPath(shardIndex, TAXONOMY_INDEX_DATA_FS_BASE_PATH, tableId)));
            }
        } catch (IOException e) {
            throw new AnalyticsIndexException("Error in clearing index data: " + e.getMessage(), e);
        }
    }
}

From source file:org.wso2.carbon.analytics.dataservice.indexing.AnalyticsDataIndexer.java

License:Open Source License

private void deleteIndexData(int tenantId, String tableName, String shardId) throws AnalyticsIndexException {
    String shardedTableId = this.generateShardedTableId(tenantId, tableName, shardId);
    IndexWriter writer = this.createIndexWriter(shardedTableId);
    try {//from   w  w  w .j  a  v a  2  s .  c om
        writer.deleteAll();
    } catch (IOException e) {
        throw new AnalyticsIndexException("Error in deleting index data: " + e.getMessage(), e);
    } finally {
        try {
            writer.close();
        } catch (IOException e) {
            log.error("Error in closing index writer: " + e.getMessage(), e);
        }
    }
}

From source file:pe.gob.mef.gescon.lucene.Indexador.java

public static void indexDirectory() {
    String filepath;//from  w  w  w  .j a  v  a 2s  .  c o  m
    String user;
    String password;
    String url;
    NtlmPasswordAuthentication auth;
    SmbFile dir;
    File file;
    Document doc;
    try {
        ResourceBundle bundle = ResourceBundle.getBundle(Parameters.getParameters());
        String indexDirectory = bundle.getString("indexDirectory");
        filepath = bundle.getString("filepath");
        user = bundle.getString("user");
        password = bundle.getString("password");

        Path path = Paths.get(indexDirectory);
        Directory directory = FSDirectory.open(path);
        IndexWriterConfig config = new IndexWriterConfig(new SimpleAnalyzer());
        IndexWriter indexWriter = new IndexWriter(directory, config);
        indexWriter.deleteAll();

        PreguntaService preguntaService = (PreguntaService) ServiceFinder.findBean("PreguntaService");
        List<Pregunta> listaP = preguntaService.getPreguntasActivedPosted();
        if (!CollectionUtils.isEmpty(listaP)) {
            String prefix = bundle.getString("prprefix");
            for (Pregunta p : listaP) {
                url = filepath + prefix + p.getNpreguntaid().toString() + "/" + BigDecimal.ZERO.toString()
                        + "/";
                auth = new NtlmPasswordAuthentication(null, user, password);
                dir = new SmbFile(url, auth);
                file = new File(dir.getUncPath(), FILE_NAME);

                if (file.exists()) {
                    doc = new Document();
                    doc.add(new TextField(FIELD_PATH, dir.getUncPath(), Store.YES));
                    doc.add(new TextField(FIELD_FILENAME, FILE_NAME, Store.YES));
                    doc.add(new TextField(FIELD_CODE, p.getNpreguntaid().toString(), Store.YES));
                    doc.add(new TextField(FIELD_TYPE, Constante.PREGUNTAS.toString(), Store.YES));

                    FileInputStream is = new FileInputStream(file);
                    BufferedReader reader = new BufferedReader(new InputStreamReader(is));
                    StringBuilder stringBuffer = new StringBuilder();
                    String line = null;
                    while ((line = reader.readLine()) != null) {
                        stringBuffer.append(line).append("\n");
                    }
                    reader.close();
                    doc.add(new TextField(FIELD_CONTENTS, stringBuffer.toString(), Store.YES));
                    indexWriter.addDocument(doc);
                }
            }
        }

        BaseLegalService baseLegalService = (BaseLegalService) ServiceFinder.findBean("BaseLegalService");
        List<BaseLegal> listaB = baseLegalService.getBaselegalesActivedPosted();
        if (!CollectionUtils.isEmpty(listaB)) {
            String prefix = bundle.getString("blprefix");
            for (BaseLegal b : listaB) {
                url = filepath + prefix + b.getNbaselegalid().toString() + "/" + BigDecimal.ZERO.toString()
                        + "/";
                auth = new NtlmPasswordAuthentication(null, user, password);
                dir = new SmbFile(url, auth);
                file = new File(dir.getUncPath(), FILE_NAME);

                if (file.exists()) {
                    doc = new Document();
                    doc.add(new TextField(FIELD_PATH, dir.getUncPath(), Store.YES));
                    doc.add(new TextField(FIELD_FILENAME, FILE_NAME, Store.YES));
                    doc.add(new TextField(FIELD_CODE, b.getNbaselegalid().toString(), Store.YES));
                    doc.add(new TextField(FIELD_TYPE, Constante.BASELEGAL.toString(), Store.YES));

                    FileInputStream is = new FileInputStream(file);
                    BufferedReader reader = new BufferedReader(new InputStreamReader(is));
                    StringBuilder stringBuffer = new StringBuilder();
                    String line = null;
                    while ((line = reader.readLine()) != null) {
                        stringBuffer.append(line).append("\n");
                    }
                    reader.close();
                    doc.add(new TextField(FIELD_CONTENTS, stringBuffer.toString(), Store.YES));
                    indexWriter.addDocument(doc);
                }
            }
        }

        ConocimientoService conocimientoService = (ConocimientoService) ServiceFinder
                .findBean("ConocimientoService");
        SeccionService seccionService = (SeccionService) ServiceFinder.findBean("SeccionService");
        List<Conocimiento> listaC = conocimientoService.getConocimientosActivedPublic();
        if (!CollectionUtils.isEmpty(listaC)) {
            for (Conocimiento c : listaC) {
                url = filepath + c.getVruta();
                auth = new NtlmPasswordAuthentication(null, user, password);
                dir = new SmbFile(url, auth);
                file = new File(dir.getUncPath(), FILE_NAME);

                if (file.exists()) {
                    doc = new Document();
                    doc.add(new TextField(FIELD_PATH, dir.getUncPath(), Store.YES));
                    doc.add(new TextField(FIELD_FILENAME, FILE_NAME, Store.YES));
                    doc.add(new TextField(FIELD_CODE, c.getNconocimientoid().toString(), Store.YES));
                    doc.add(new TextField(FIELD_TYPE, c.getNtipoconocimientoid().toString(), Store.YES));

                    FileInputStream is = new FileInputStream(file);
                    BufferedReader reader = new BufferedReader(new InputStreamReader(is));
                    StringBuilder stringBuffer = new StringBuilder();
                    String line = null;
                    while ((line = reader.readLine()) != null) {
                        stringBuffer.append(line).append("\n");
                    }
                    reader.close();
                    doc.add(new TextField(FIELD_CONTENTS, stringBuffer.toString(), Store.YES));
                    indexWriter.addDocument(doc);
                }

                List<Seccion> listaS = seccionService.getSeccionesByConocimiento(c.getNconocimientoid());
                if (!CollectionUtils.isEmpty(listaS)) {
                    for (Seccion s : listaS) {
                        url = filepath + s.getVruta();
                        auth = new NtlmPasswordAuthentication(null, user, password);
                        dir = new SmbFile(url, auth);
                        file = new File(dir.getUncPath(), FILE_NAME);

                        if (file.exists()) {
                            doc = new Document();
                            doc.add(new TextField(FIELD_PATH, dir.getUncPath(), Store.YES));
                            doc.add(new TextField(FIELD_FILENAME, FILE_NAME, Store.YES));
                            doc.add(new TextField(FIELD_CODE, c.getNconocimientoid().toString(), Store.YES));
                            doc.add(new TextField(FIELD_TYPE, c.getNtipoconocimientoid().toString(),
                                    Store.YES));

                            FileInputStream fis = new FileInputStream(file);
                            BufferedReader br = new BufferedReader(new InputStreamReader(fis));
                            StringBuilder sb = new StringBuilder();
                            String lines = null;
                            while ((lines = br.readLine()) != null) {
                                sb.append(lines).append("\n");
                            }
                            br.close();
                            doc.add(new TextField(FIELD_CONTENTS, sb.toString(), Store.YES));
                            indexWriter.addDocument(doc);
                        }
                    }
                }
            }
        }
        indexWriter.commit();
        indexWriter.close();
        directory.close();
    } catch (Exception e) {
        e.printStackTrace();
    }
}

From source file:psidev.psi.mi.search.index.PsimiIndexWriter.java

License:Apache License

public void index(Directory directory, InputStream is, boolean createIndex, boolean hasHeaderLine)
        throws IOException, ConverterException, MitabLineException {
    IndexWriterConfig writerConfig = new IndexWriterConfig(Version.LUCENE_30,
            new StandardAnalyzer(Version.LUCENE_30));
    LogMergePolicy policy = new LogDocMergePolicy();
    policy.setMergeFactor(MERGE_FACTOR);
    policy.setMaxMergeDocs(Integer.MAX_VALUE);
    writerConfig.setMergePolicy(policy);

    IndexWriter indexWriter = new IndexWriter(directory, writerConfig);

    if (createIndex) {
        indexWriter.commit();//from   ww w  .j  a  va  2 s  .  c  o m
        indexWriter.deleteAll();
        indexWriter.commit();
    }

    index(indexWriter, is, hasHeaderLine);
    indexWriter.close();
}

From source file:test.LuceneIndexAndSearchDemo.java

License:Apache License

/**
 *  ???/*  www.  j a v  a  2 s  . c  o m*/
 * @param args
 */
public static void main(String[] args) {
    // IKAnalyzer?
    Analyzer analyzer = new BaseAnalyzer(true);
    Directory directory = null;
    IndexWriter iwriter = null;
    IndexReader ireader = null;
    IndexSearcher isearcher = null;
    try {
        // 
        directory = new RAMDirectory();
        // directory = org.apache.lucene.store.FSDirectory.open(java.nio.file.FileSystems.getDefault().getPath("./LuceneDemo/index")); // 1.DirectoryJDK

        // ?IndexWriterConfig
        IndexWriterConfig iwConfig = new IndexWriterConfig(analyzer);
        iwConfig.setOpenMode(OpenMode.CREATE_OR_APPEND);
        iwriter = new IndexWriter(directory, iwConfig);
        iwriter.deleteAll();// ?index
        // 
        // 
        String text = "IK Analyzer???????";
        for (int i = 0; i < 10; i++) {
            Document doc = new Document();
            doc.add(new StringField("ID", (10000 + i) + "", Field.Store.YES));
            doc.add(new TextField("text", text, Field.Store.YES));
            iwriter.addDocument(doc);
        }
        if (true) {
            File dFile = new File("./web/META-INF"); // ?File
            File[] files = dFile.listFiles();
            for (File file : files) {
                Document document = new Document(); // 3.Document
                // 4.DocumentField; ?FieldType,TextField????,API??
                document.add(new Field("content", new FileReader(file), TextField.TYPE_NOT_STORED));
                document.add(new Field("name", file.getName(), TextField.TYPE_STORED));
                document.add(new Field("path", file.getAbsolutePath(), TextField.TYPE_STORED));
                iwriter.addDocument(document); // 5.IndexWriter
            }
        }

        iwriter.close();

        // ?**********************************
        // ?
        ireader = DirectoryReader.open(directory);
        isearcher = new IndexSearcher(ireader);
        String keyword = "?2";
        // QueryParser?Query
        QueryParser qp = new QueryParser("text", analyzer);
        qp.setDefaultOperator(QueryParser.OR_OPERATOR);
        Query query = qp.parse(keyword);
        System.out.println("Query = " + query);
        // ?5?
        TopDocs topDocs = isearcher.search(query, 5);
        System.out.println("" + topDocs.totalHits);
        // 
        ScoreDoc[] scoreDocs = topDocs.scoreDocs;
        for (int i = 0; i < scoreDocs.length; i++) {
            Document targetDoc = isearcher.doc(scoreDocs[i].doc);
            System.out.println("" + targetDoc.toString());
        }
    } catch (CorruptIndexException e) {
        e.printStackTrace();
    } catch (LockObtainFailedException e) {
        e.printStackTrace();
    } catch (IOException e) {
        e.printStackTrace();
    } catch (ParseException e) {
        e.printStackTrace();
    } finally {
        if (ireader != null) {
            try {
                ireader.close();
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
        if (directory != null) {
            try {
                directory.close();
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
    }
}

From source file:ubic.basecode.ontology.search.OntologyIndexer.java

License:Apache License

/**
 * Create an on-disk index from an existing OntModel. Any existing index will be deleted/overwritten.
 * //from  w  w w .ja v a2s.  com
 * @see             {@link http://jena.apache.org/documentation/larq/}
 * @param  datafile or uri
 * @param  name     used to refer to this index later
 * @param  model
 * @return
 */
@SuppressWarnings("resource")
private static synchronized SearchIndex index(String name, OntModel model) {

    File indexdir = getIndexPath(name);

    try {
        StopWatch timer = new StopWatch();
        timer.start();
        FSDirectory dir = FSDirectory.open(indexdir);
        log.info("Indexing " + name + " to: " + indexdir);

        /*
         * adjust the analyzer ...
         */
        Analyzer analyzer = new EnglishAnalyzer(Version.LUCENE_36);
        IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_36, analyzer);
        IndexWriter indexWriter = new IndexWriter(dir, config);
        indexWriter.deleteAll(); // start with clean slate.
        assert 0 == indexWriter.numDocs();

        IndexBuilderSubject larqSubjectBuilder = new IndexBuilderSubject(indexWriter);
        StmtIterator listStatements = model.listStatements(new IndexerSelector());
        larqSubjectBuilder.indexStatements(listStatements);
        indexWriter.commit();
        log.info(indexWriter.numDocs() + " Statements indexed...");
        indexWriter.close();

        Directory dirstd = indexStd(name, model);

        MultiReader r = new MultiReader(IndexReader.open(dir), IndexReader.open(dirstd));

        // workaround to get the EnglishAnalyzer.
        SearchIndex index = new SearchIndex(r, new EnglishAnalyzer(Version.LUCENE_36));
        // larqSubjectBuilder.getIndex(); // always returns a StandardAnalyazer
        assert index.getLuceneQueryParser().getAnalyzer() instanceof EnglishAnalyzer;

        log.info("Done indexing of " + name + " in " + String.format("%.2f", timer.getTime() / 1000.0) + "s");

        return index;
    } catch (IOException e) {
        throw new RuntimeException("Indexing failure for " + name, e);
    }
}

From source file:ubic.basecode.ontology.search.OntologyIndexer.java

License:Apache License

/**
 * We need to also analyze using the Standard analyzer, which doesn't do stemming and allows wildcard.
 *///from  www.j  a  va2 s .c om
@SuppressWarnings("resource")
private static Directory indexStd(String name, OntModel model) throws IOException {

    File file = getIndexPath(name + ".std");

    FSDirectory dir = FSDirectory.open(file);
    dir.getLockFactory().clearLock(dir.getLockID());
    log.info("Index to: " + file);
    Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_36);
    IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_36, analyzer);
    IndexWriter indexWriter = new IndexWriter(dir, config);
    indexWriter.deleteAll();
    IndexBuilderSubject larqSubjectBuilder = new IndexBuilderSubject(indexWriter);
    StmtIterator listStatements = model.listStatements(new IndexerSelector());
    larqSubjectBuilder.indexStatements(listStatements);
    indexWriter.commit();
    log.info(indexWriter.numDocs() + " Statements indexed...");
    indexWriter.close();
    return dir;
}

From source file:uib.scratch.WriteIndex.java

/**
 * @param args//w  w w.jav  a 2 s .  co m
 * @throws java.io.IOException
 * @throws org.xml.sax.SAXException
 */
public static void main(String[] args) throws IOException, SAXException {

    File docs = new File("documents");
    File indexDir = new File(INDEX_DIRECTORY);

    //Directory directory = FSDirectory.open(indexDir);

    Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_30);
    //IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_30, analyzer);
    //IndexWriter writer = new IndexWriter(directory, analyzer, true, IndexWriter.MaxFieldLength.UNLIMITED);
    IndexWriter writer = new IndexWriter(FSDirectory.open(indexDir), analyzer, true,
            IndexWriter.MaxFieldLength.LIMITED);
    System.out.println(indexDir);
    writer.deleteAll();

    for (File file : docs.listFiles()) {
        Metadata metadata = new Metadata();
        ContentHandler handler = new BodyContentHandler();
        ParseContext context = new ParseContext();
        Parser parser = new AutoDetectParser();
        InputStream stream = new FileInputStream(file);
        try {
            parser.parse(stream, handler, metadata, context);
        } catch (TikaException e) {
        } catch (IOException e) {
            e.printStackTrace();
        } finally {
            stream.close();
        }

        String text = handler.toString();
        String fileName = file.getName();

        Document doc = new Document();
        doc.add(new Field("file", fileName, Store.YES, Index.NO));

        for (String key : metadata.names()) {
            String name = key.toLowerCase();
            String value = metadata.get(key);

            if (StringUtils.isEmpty(value)) {
                continue;
            }

            if ("keywords".equalsIgnoreCase(key)) {
                for (String keyword : value.split(",?(\\s+)")) {
                    doc.add(new Field(name, keyword, Store.YES, Index.NOT_ANALYZED));
                }
            } else if ("title".equalsIgnoreCase(key)) {
                doc.add(new Field(name, value, Store.YES, Index.ANALYZED));
            } else {
                doc.add(new Field(name, fileName, Store.YES, Index.NOT_ANALYZED));
            }
        }
        doc.add(new Field("text", text, Store.NO, Index.ANALYZED));
        writer.addDocument(doc);

    }

    writer.commit();
    //.deleteUnusedFiles();

    System.out.println(writer.maxDoc() + " documents written");
}

From source file:uk.gov.nationalarchives.discovery.taxonomy.common.repository.lucene.LuceneTestTrainingDataSet.java

License:Mozilla Public License

public void deleteTrainingSetIndex() {
    logger.info(".deleteTrainingSetIndex");
    IndexWriter writer = null;
    try {/*from ww  w  .j  a  v  a2s.  c om*/
        writer = new IndexWriter(trainingSetDirectory, new IndexWriterConfig(trainingSetAnalyser));

        writer.deleteAll();
    } catch (IOException e) {
        throw new TaxonomyException(TaxonomyErrorType.LUCENE_IO_EXCEPTION, e);
    } finally {
        LuceneHelperTools.closeCloseableObjectQuietly(writer);
    }
}