Example usage for org.apache.lucene.index IndexWriter commit

List of usage examples for org.apache.lucene.index IndexWriter commit

Introduction

In this page you can find the example usage for org.apache.lucene.index IndexWriter commit.

Prototype

@Override
public final long commit() throws IOException 

Source Link

Document

Commits all pending changes (added and deleted documents, segment merges, added indexes, etc.) to the index, and syncs all referenced index files, such that a reader will see the changes and the index updates will survive an OS or machine crash or power loss.

Usage

From source file:de.elbe5.cms.search.SearchBean.java

License:Open Source License

protected void indexUser(IndexWriter writer, int id) throws Exception {
    Connection con = getConnection();
    PreparedStatement pst = null;
    try {/*from ww w  .  j  a va2s. co m*/
        pst = con.prepareStatement(INDEX_USER_SQL);
        pst.setInt(1, id);
        ResultSet rs = pst.executeQuery();
        while (rs.next()) {
            SearchData data = getUserSearchData(rs);
            writer.addDocument(data.getDoc());
        }
        rs.close();
        writer.commit();
        Log.log("finished indexing user");
    } catch (SQLException se) {
        se.printStackTrace();
    } finally {
        closeStatement(pst);
        closeConnection(con);
    }
}

From source file:de.ingrid.interfaces.csw.index.impl.IngridGeoTKLuceneIndexer.java

License:EUPL

/**
 * This method remove documents identified by query from the index.
 * // ww w .j  av  a2s . c  o  m
 * @param query
 * @throws ParseException
 */
public List<String> removeDocumentByQuery(final String queryString) throws ParseException {
    List<String> deletedRecords = new ArrayList<String>();
    try {
        final QueryParser parser = new QueryParser(Version.LUCENE_36, "anytext", analyzer);

        Query query = parser.parse(queryString);

        final IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_36, analyzer);
        final IndexWriter writer = new IndexWriter(LuceneUtils.getAppropriateDirectory(getFileDirectory()),
                config);

        LOGGER.log(logLevel, "Query:{0}", query);

        IndexReader reader = IndexReader.open(writer, false);
        IndexSearcher searcher = new IndexSearcher(reader);
        TopDocs docs = searcher.search(query, Integer.MAX_VALUE);
        for (ScoreDoc doc : docs.scoreDocs) {
            deletedRecords.add(reader.document(doc.doc).get("id"));
        }
        writer.deleteDocuments(query);

        writer.commit();
        searcher.close();
        reader.close();
        writer.close();

    } catch (CorruptIndexException ex) {
        LOGGER.log(Level.WARNING, "CorruptIndexException while indexing document: " + ex.getMessage(), ex);
    } catch (IOException ex) {
        LOGGER.log(Level.WARNING, "IOException while indexing document: " + ex.getMessage(), ex);
    }
    return deletedRecords;
}

From source file:de.jetsli.lumeo.util.LuceneHelperTest.java

License:Apache License

@Test
public void testTermMatching() throws Exception {
    RAMDirectory dir = new RAMDirectory();
    IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(RawLucene.VERSION, new KeywordAnalyzer()));
    Document d = new Document();

    FieldType ft = Mapping.getLongFieldType(true, true);
    d.add(new LongField("id", 1234, ft));
    d.add(new LongField("tmp", 1111, ft));
    w.addDocument(d);//  ww w. j a  v a 2  s .c o m

    d = new Document();
    d.add(new LongField("id", 1234, ft));
    d.add(new LongField("tmp", 2222, ft));
    w.updateDocument(getTerm("id", 1234), d);

    d = new Document();
    d.add(new LongField("id", 0, ft));
    w.addDocument(d);
    w.commit();

    IndexReader reader = DirectoryReader.open(w, true);
    IndexSearcher searcher = new IndexSearcher(reader);

    BytesRef bytes = new BytesRef();
    NumericUtils.longToPrefixCoded(1234, 0, bytes);
    TopDocs td = searcher.search(new TermQuery(new Term("id", bytes)), 10);
    assertEquals(1, td.totalHits);
    assertEquals(1234L, searcher.doc(td.scoreDocs[0].doc).getField("id").numericValue());
    assertEquals(2222L, searcher.doc(td.scoreDocs[0].doc).getField("tmp").numericValue());
    w.close();
}

From source file:de.tudarmstadt.lt.lm.app.GenerateNgramIndex.java

License:Apache License

public void create_ngram_index(File ngram_joined_counts_file) throws IOException {
    File index_dir = new File(_index_dir, "ngram");
    if (index_dir.exists()) {
        LOG.info("Ngram index already exists in directory '{}'.", index_dir.getAbsolutePath());
        if (_overwrite) {
            LOG.info("Overwriting index '{}',", index_dir);
            index_dir.delete();/*w ww . ja  va 2s.c o  m*/
        } else
            return;
    }
    index_dir.mkdirs();

    Analyzer analyzer = new KeywordAnalyzer();
    IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_4_9, analyzer);
    iwc.setOpenMode(OpenMode.CREATE);
    // use 80 percent of the available total memory
    double total_mem_mb = (double) Runtime.getRuntime().maxMemory() / 1e6;
    double percentage_ram_buffer = Properties.ramBufferPercentage();
    if (percentage_ram_buffer > 0) {
        double percentage_ram_buffer_mb = total_mem_mb * percentage_ram_buffer;
        LOG.info(String.format("Setting ram buffer size to %.2f MB (%.2f%% from %.2f MB)",
                percentage_ram_buffer_mb, percentage_ram_buffer * 100, total_mem_mb));
        iwc.setRAMBufferSizeMB(percentage_ram_buffer_mb);
    }

    Directory directory = new MMapDirectory(index_dir);
    IndexWriter writer_ngram = new IndexWriter(directory, iwc);

    InputStream in = new FileInputStream(ngram_joined_counts_file);
    if (ngram_joined_counts_file.getName().endsWith(".gz"))
        in = new GZIPInputStream(in);
    LineIterator iter = new LineIterator(new BufferedReader(new InputStreamReader(in, "UTF-8")));

    Document doc = new Document();
    Field f_ngram = new StringField("ngram", "", Store.YES);
    doc.add(f_ngram);
    Field f_n = new IntField("cardinality", 0, Store.YES);
    doc.add(f_n);
    Field f_word = new StringField("word", "", Store.YES);
    doc.add(f_word);
    Field f_hist = new StringField("history", "", Store.YES);
    doc.add(f_hist);
    Field f_lower = new StringField("lower", "", Store.YES);
    doc.add(f_lower);
    Field f_count = new StoredField("num", 0L);
    doc.add(f_count);

    Field[] f_follow = new Field[4];
    f_follow[0] = new StoredField("nf_s", 0L);
    doc.add(f_follow[0]);
    f_follow[1] = new StoredField("nf_N1", 0L);
    doc.add(f_follow[1]);
    f_follow[2] = new StoredField("nf_N2", 0L);
    doc.add(f_follow[2]);
    f_follow[3] = new StoredField("nf_N3", 0L);
    doc.add(f_follow[3]);
    Field[] f_precede = new Field[4];
    f_precede[0] = new StoredField("np_s", 0L);
    doc.add(f_precede[0]);
    f_precede[1] = new StoredField("np_N1", 0L);
    doc.add(f_precede[1]);
    f_precede[2] = new StoredField("np_N2", 0L);
    doc.add(f_precede[2]);
    f_precede[3] = new StoredField("np_N3", 0L);
    doc.add(f_precede[3]);
    Field[] f_followerprecede = new Field[4];
    f_followerprecede[0] = new StoredField("nfp_s", 0L);
    doc.add(f_followerprecede[0]);
    f_followerprecede[1] = new StoredField("nfp_N1", 0L);
    doc.add(f_followerprecede[1]);
    f_followerprecede[2] = new StoredField("nfp_N2", 0L);
    doc.add(f_followerprecede[2]);
    f_followerprecede[3] = new StoredField("nfp_N3", 0L);
    doc.add(f_followerprecede[3]);

    Long[][] N = new Long[][] { { 0L, 0L, 0L, 0L, 0L, 0L } };
    Long[] S = new Long[] { 0L };
    long c = 0;
    while (iter.hasNext()) {
        if (++c % 100000 == 0)
            LOG.info("Adding {}'th ngram.", c);
        String line = iter.next();
        try {
            String[] splits = de.tudarmstadt.lt.utilities.StringUtils.rtrim(line).split("\t");
            String ngram_str = splits[0];
            if (de.tudarmstadt.lt.utilities.StringUtils.trim(ngram_str).isEmpty()) {
                LOG.warn("Ngram is empty, skipping line {}: '{}' (file '{}').", c, line,
                        ngram_joined_counts_file);
                continue;
            }

            List<String> ngram = Arrays.asList(ngram_str.split(" "));
            long num = Long.parseLong(splits[1]);
            int n = ngram.size();

            f_ngram.setStringValue(ngram_str);
            f_n.setIntValue(n);
            f_word.setStringValue(ngram.get(ngram.size() - 1));
            f_hist.setStringValue(StringUtils.join(ngram.subList(0, ngram.size() - 1), " "));
            f_lower.setStringValue(StringUtils.join(ngram.subList(1, ngram.size()), " "));
            f_count.setLongValue(num);

            for (int j = 0; j < f_follow.length; j++) {
                f_follow[j].setLongValue(0L);
                f_precede[j].setLongValue(0L);
                f_followerprecede[j].setLongValue(0L);
            }

            if (splits.length > 2 && !splits[2].isEmpty()) {
                // precede or follow or followerprecede
                String[] splits_ = splits[2].split(":");
                String type = splits_[0];
                String[] count_values = splits_[1].split(",");
                if (count_values.length > 0) {
                    if ("n_f".equals(type))
                        f_follow[0].setLongValue(Long.parseLong(count_values[0]));
                    else if ("n_p".equals(type))
                        f_precede[0].setLongValue(Long.parseLong(count_values[0]));
                    else if ("n_fp".equals(type))
                        f_followerprecede[0].setLongValue(Long.parseLong(count_values[0]));
                }
                for (int i = 1; i < count_values.length; i++) {
                    if ("n_f".equals(type))
                        f_follow[i].setLongValue(Long.parseLong(count_values[i]));
                    else if ("n_p".equals(type))
                        f_precede[i].setLongValue(Long.parseLong(count_values[i]));
                    else if ("n_fp".equals(type))
                        f_followerprecede[i].setLongValue(Long.parseLong(count_values[i]));
                }
            }
            if (splits.length > 3 && !splits[3].isEmpty()) {
                // should be follow or followerprecede
                String[] splits_ = splits[3].split(":");
                String type = splits_[0];
                String[] count_values = splits_[1].split(",");
                if (count_values.length > 0) {
                    if ("n_f".equals(type))
                        f_follow[0].setLongValue(Long.parseLong(count_values[0]));
                    else if ("n_p".equals(type))
                        f_precede[0].setLongValue(Long.parseLong(count_values[0]));
                    else if ("n_fp".equals(type))
                        f_followerprecede[0].setLongValue(Long.parseLong(count_values[0]));
                }
                for (int i = 1; i < count_values.length; i++) {
                    if ("n_f".equals(type))
                        f_follow[i].setLongValue(Long.parseLong(count_values[i]));
                    else if ("n_p".equals(type))
                        f_precede[i].setLongValue(Long.parseLong(count_values[i]));
                    else if ("n_fp".equals(type))
                        f_followerprecede[i].setLongValue(Long.parseLong(count_values[i]));
                }
            }
            if (splits.length > 4 && !splits[4].isEmpty()) {
                // should be followerprecede
                String[] splits_ = splits[4].split(":");
                String type = splits_[0];
                String[] count_values = splits_[1].split(",");
                if (count_values.length > 0) {
                    if ("n_f".equals(type))
                        f_follow[0].setLongValue(Long.parseLong(count_values[0]));
                    else if ("n_p".equals(type))
                        f_precede[0].setLongValue(Long.parseLong(count_values[0]));
                    else if ("n_fp".equals(type))
                        f_followerprecede[0].setLongValue(Long.parseLong(count_values[0]));
                }
                for (int i = 1; i < count_values.length; i++) {
                    if ("n_f".equals(type))
                        f_follow[i].setLongValue(Long.parseLong(count_values[i]));
                    else if ("n_p".equals(type))
                        f_precede[i].setLongValue(Long.parseLong(count_values[i]));
                    else if ("n_fp".equals(type))
                        f_followerprecede[i].setLongValue(Long.parseLong(count_values[i]));
                }
            }

            writer_ngram.addDocument(doc);

            while (N.length <= n) {
                N = ArrayUtils.getConcatinatedArray(N, new Long[][] { { 0L, 0L, 0L, 0L, 0L, 0L } });
                S = ArrayUtils.getConcatinatedArray(S, new Long[] { 0L });
            }

            if (num == 1L)
                N[n][1]++;
            else if (num == 2L)
                N[n][2]++;
            else if (num == 3L)
                N[n][3]++;
            else if (num == 4L)
                N[n][4]++;
            else
                N[n][5]++;
            N[n][0]++;
            S[n] += num;

        } catch (Exception e) {
            LOG.error("Could not process line '{}' in file '{}:{}', malformed line.", line,
                    ngram_joined_counts_file, c, e);
        }
    }

    writer_ngram.forceMergeDeletes();
    writer_ngram.commit();
    writer_ngram.close();

    StringBuilder b = new StringBuilder(String.format(
            "#%n# Number of times where an ngram occurred: %n#  at_least_once, exactly_once, exactly_twice, exactly_three_times, exactly_four_times, five_times_or_more.%n#%nmax_n=%d%nmax_c=6%n",
            N.length - 1));
    for (int n = 1; n < N.length; n++)
        b.append(String.format("n%d=%s%n", n, StringUtils.join(N[n], ',')));
    for (int n = 1; n < S.length; n++)
        b.append(String.format("s%d=%d%n", n, S[n]));
    FileUtils.writeStringToFile(new File(_index_dir, "__sum_ngrams__"), b.toString());

}

From source file:de.tudarmstadt.lt.lm.app.GenerateNgramIndex.java

License:Apache License

public void create_vocabulary_index(File vocabulary_file) throws IOException {
    File index_dir = new File(_index_dir, "vocab");
    if (index_dir.exists()) {
        LOG.info("Vocabulary index already exists in directory '{}'.", index_dir.getAbsolutePath());
        if (_overwrite) {
            LOG.info("Overwriting index '{}',", index_dir);
            index_dir.delete();//  w w w  .j  ava 2s .co  m
        } else
            return;
    }
    index_dir.mkdirs();
    Analyzer analyzer = new KeywordAnalyzer();
    IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_4_9, analyzer);
    iwc.setOpenMode(OpenMode.CREATE);
    iwc.setRAMBufferSizeMB(1024.0);
    Directory directory = new MMapDirectory(index_dir);
    IndexWriter writer_vocab = new IndexWriter(directory, iwc);

    InputStream in = new FileInputStream(vocabulary_file);
    if (vocabulary_file.getName().endsWith(".gz"))
        in = new GZIPInputStream(in);
    LineIterator iter = new LineIterator(new BufferedReader(new InputStreamReader(in, "UTF-8")));
    Document doc = new Document();
    Field f_word = new StringField("word", "", Field.Store.YES);
    doc.add(f_word);
    long c = 0;
    while (iter.hasNext()) {
        if (++c % 10000 == 0)
            LOG.info("Adding {}'th word.", c);
        String line = iter.next();
        try {
            String word = line.trim();
            f_word.setStringValue(word);
            writer_vocab.addDocument(doc);
        } catch (Exception e) {
            LOG.warn("Could not process line '{}' in file '{}', malformed line.", line, vocabulary_file, e);
        }
    }

    writer_vocab.forceMergeDeletes();
    writer_vocab.commit();
    writer_vocab.close();
}

From source file:de.unidue.inf.is.ezdl.dlservices.search.handlers.ranking.LuceneRanker.java

License:Open Source License

private void createIndex(ResultDocumentList toRank, IndexWriter indexWriter)
        throws CorruptIndexException, IOException {
    for (ResultDocument result : toRank) {
        Document document = result.getDocument();
        org.apache.lucene.document.Document d = new org.apache.lucene.document.Document();

        StringBuilder sb = new StringBuilder();

        String oid = document.getOid();

        Field.Store store = Field.Store.NO;

        Field field;//from w  w w  .ja  va  2 s.c o m
        if (!StringUtils.isEmpty(oid)) {
            field = new Field("oid", oid, Field.Store.YES, Field.Index.NO);
            d.add(field);
            String title = document.getTitle();
            if (!StringUtils.isEmpty(title)) {
                field = new Field(de.unidue.inf.is.ezdl.dlcore.data.fields.Field.TITLE.toString(), title, store,
                        Field.Index.ANALYZED);
                field.setOmitNorms(true);
                field.setBoost(2.0f);
                d.add(field);
                sb.append(title);
                sb.append(" ");
            }
            if (document instanceof TextDocument) {
                String docAbstract = ((TextDocument) document).getAbstract();
                if (!StringUtils.isEmpty(docAbstract)) {
                    field = new Field(de.unidue.inf.is.ezdl.dlcore.data.fields.Field.ABSTRACT.toString(),
                            docAbstract, store, Field.Index.ANALYZED);
                    d.add(field);
                    sb.append(docAbstract);
                    sb.append(" ");
                }
            }
            int year = document.getYear();
            if (year != 0) {
                field = new Field(de.unidue.inf.is.ezdl.dlcore.data.fields.Field.YEAR.toString(),
                        String.valueOf(year), store, Field.Index.NOT_ANALYZED);
                d.add(field);
                sb.append(" ");
                sb.append(year);
            }
            PersonList authorList = document.getAuthorList();
            if (authorList != null) {
                field = new Field(de.unidue.inf.is.ezdl.dlcore.data.fields.Field.AUTHOR.toString(),
                        authorList.toString(), store, Field.Index.ANALYZED);
                d.add(field);
                sb.append(authorList.toString());
            }
            field = new Field(de.unidue.inf.is.ezdl.dlcore.data.fields.Field.TEXT.toString(),
                    sb.toString().toString(), store, Field.Index.ANALYZED);
            d.add(field);

            indexWriter.addDocument(d);
        }
    }
    indexWriter.commit();
}

From source file:de.uni_koeln.spinfo.maalr.lucene.core.DictionaryCreator.java

License:Apache License

void delete(LexEntry entry) throws IOException {
    IndexWriter writer = initIndexWriter();
    Term queryTerm = new Term(LexEntry.ID, entry.getId());
    writer.deleteDocuments(queryTerm);/*from   w w  w  .  ja  v  a2  s .  com*/
    writer.commit();
    writer.close();
}

From source file:de.uni_koeln.spinfo.maalr.lucene.core.DictionaryLoader.java

License:Apache License

void update(LexEntry entry) throws IOException {
    IndexWriter writer = initIndexWriter();
    Term queryTerm = new Term(LexEntry.ID, entry.getId());
    writer.deleteDocuments(queryTerm);//from w w w . j av a  2 s.c om
    if (entry.getCurrent() != null) {
        List<Document> docs = createDocument(new HashSet<String>(), entry);
        for (Document document : docs) {
            writer.addDocument(document);
        }
    }
    writer.commit();
    writer.close();
    reader.close();
    reader = DirectoryReader.open(ram);
    searcher = new IndexSearcher(reader);
}

From source file:de.uni_koeln.spinfo.maalr.lucene.core.DictionaryLoader.java

License:Apache License

void delete(LexEntry entry) throws IOException {
    IndexWriter writer = initIndexWriter();
    Term queryTerm = new Term(LexEntry.ID, entry.getId());
    writer.deleteDocuments(queryTerm);/* ww  w  .jav a  2s .c o m*/
    writer.commit();
    writer.close();
    reader.close();
    reader = DirectoryReader.open(ram);
    searcher = new IndexSearcher(reader);
}

From source file:demo.jaxrs.search.server.Catalog.java

License:Apache License

@DELETE
public Response delete() throws IOException {
    final IndexWriter writer = getIndexWriter();

    try {/*w w w .j a  v a2  s . co m*/
        storage.deleteAll();
        writer.deleteAll();
        writer.commit();
    } finally {
        writer.close();
    }

    return Response.ok().build();
}