Example usage for org.apache.lucene.index IndexWriter getConfig

List of usage examples for org.apache.lucene.index IndexWriter getConfig

Introduction

In this page you can find the example usage for org.apache.lucene.index IndexWriter getConfig.

Prototype

public LiveIndexWriterConfig getConfig() 

Source Link

Document

Returns a LiveIndexWriterConfig , which can be used to query the IndexWriter current settings, as well as modify "live" ones.

Usage

From source file:org.elasticsearch.index.merge.Merges.java

License:Apache License

/**
 * See {@link org.apache.lucene.index.IndexWriter#forceMergeDeletes(boolean)}, with the additional
 * logic of explicitly enabling merges if the scheduler is {@link org.elasticsearch.index.merge.EnableMergeScheduler}.
 *///  ww w.  j  a v  a  2  s  .c om
public static void forceMergeDeletes(IndexWriter writer, boolean doWait) throws IOException {
    MergeScheduler mergeScheduler = writer.getConfig().getMergeScheduler();
    if (mergeScheduler instanceof EnableMergeScheduler) {
        ((EnableMergeScheduler) mergeScheduler).enableMerge();
        try {
            writer.forceMergeDeletes(doWait);
        } finally {
            ((EnableMergeScheduler) mergeScheduler).disableMerge();
        }
    } else {
        writer.forceMergeDeletes(doWait);
    }
}

From source file:org.elasticsearch.index.percolator.PercolatorQueryCacheTests.java

License:Apache License

public void testInvalidateEntries() throws Exception {
    Directory directory = newDirectory();
    IndexWriter indexWriter = new IndexWriter(directory,
            new IndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(NoMergePolicy.INSTANCE));

    storeQuery("0", indexWriter, termQuery("a", "0"), true, false);
    indexWriter.flush();/*from  w  w w .  ja v a 2 s.  com*/
    storeQuery("1", indexWriter, termQuery("a", "1"), true, false);
    indexWriter.flush();
    storeQuery("2", indexWriter, termQuery("a", "2"), true, false);
    indexWriter.flush();

    ShardId shardId = new ShardId("_index", ClusterState.UNKNOWN_UUID, 0);
    IndexReader indexReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(indexWriter), shardId);
    assertThat(indexReader.leaves().size(), equalTo(3));
    assertThat(indexReader.maxDoc(), equalTo(3));

    initialize("a", "type=keyword");

    IndexShard indexShard = mockIndexShard(Version.CURRENT, false);
    ThreadPool threadPool = mockThreadPool();
    IndexWarmer.Listener listener = cache.createListener(threadPool);
    listener.warmReader(indexShard, new Engine.Searcher("test", new IndexSearcher(indexReader)));
    assertThat(cache.getStats(shardId).getNumQueries(), equalTo(3L));

    PercolateQuery.QueryRegistry.Leaf leaf = cache.getQueries(indexReader.leaves().get(0));
    assertThat(leaf.getQuery(0), equalTo(new TermQuery(new Term("a", "0"))));

    leaf = cache.getQueries(indexReader.leaves().get(1));
    assertThat(leaf.getQuery(0), equalTo(new TermQuery(new Term("a", "1"))));

    leaf = cache.getQueries(indexReader.leaves().get(2));
    assertThat(leaf.getQuery(0), equalTo(new TermQuery(new Term("a", "2"))));

    // change merge policy, so that merges will actually happen:
    indexWriter.getConfig().setMergePolicy(new TieredMergePolicy());
    indexWriter.deleteDocuments(new Term("id", "1"));
    indexWriter.forceMergeDeletes();
    indexReader.close();
    indexReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(indexWriter), shardId);
    assertThat(indexReader.leaves().size(), equalTo(2));
    assertThat(indexReader.maxDoc(), equalTo(2));
    listener.warmReader(indexShard, new Engine.Searcher("test", new IndexSearcher(indexReader)));
    assertThat(cache.getStats(shardId).getNumQueries(), equalTo(2L));

    leaf = cache.getQueries(indexReader.leaves().get(0));
    assertThat(leaf.getQuery(0), equalTo(new TermQuery(new Term("a", "0"))));

    leaf = cache.getQueries(indexReader.leaves().get(1));
    assertThat(leaf.getQuery(0), equalTo(new TermQuery(new Term("a", "2"))));

    indexWriter.forceMerge(1);
    indexReader.close();
    indexReader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(indexWriter), shardId);
    assertThat(indexReader.leaves().size(), equalTo(1));
    assertThat(indexReader.maxDoc(), equalTo(2));
    listener.warmReader(indexShard, new Engine.Searcher("test", new IndexSearcher(indexReader)));
    assertThat(cache.getStats(shardId).getNumQueries(), equalTo(2L));

    leaf = cache.getQueries(indexReader.leaves().get(0));
    assertThat(leaf.getQuery(0), equalTo(new TermQuery(new Term("a", "0"))));
    assertThat(leaf.getQuery(1), equalTo(new TermQuery(new Term("a", "2"))));

    indexWriter.close();
    indexReader.close();
    directory.close();
}

From source file:org.explore3.searchengine.indexCreator.ImageIndex.java

License:Apache License

static void indexDocument(IndexWriter writer, File file) throws IOException {
    // do not try to index files that cannot be read
    if (file.canRead()) {
        if (file.isDirectory()) {
            String[] files = file.list();
            // an IO error could occur
            if (files != null) {
                for (int i = 0; i < files.length; i++) {
                    indexDocument(writer, new File(file, files[i]));
                }/*from   w w w.  ja v  a 2 s .c  o m*/
            }
        }

        else {

            try {

                Map<String, String> infoWithField = HtmlImageParsing.parse(file);

                // make a new, empty document
                Document doc = new Document();

                FieldType type = new FieldType();
                type.setIndexed(true);
                type.setStored(true);
                type.setStoreTermVectors(true);
                type.setTokenized(true);
                type.setStoreTermVectorOffsets(true);

                if (!infoWithField.isEmpty()) {
                    Field image = new TextField("image", infoWithField.get("image").toString(),
                            Field.Store.YES);
                    System.out.println(infoWithField.get("image").toString());
                    doc.add(image);
                    Field title = new StringField("title", infoWithField.get("title"), Field.Store.YES);
                    doc.add(title);

                    Field path = new StringField("path", file.getPath(), Field.Store.YES);
                    doc.add(path);
                }

                if (writer.getConfig().getOpenMode() == OpenMode.CREATE) {
                    //System.out.println("adding " + file);
                    writer.addDocument(doc);
                }

                else {
                    System.out.println("updating " + file);
                    writer.updateDocument(new Term("path", file.getPath()), doc);
                }

            } finally {
            }
        }
    }
}

From source file:org.explore3.searchengine.indexCreator.Indexer.java

License:Apache License

static void indexDocument(IndexWriter writer, File file) throws IOException {
    // do not try to index files that cannot be read
    if (file.canRead()) {
        if (file.isDirectory()) {
            String[] files = file.list();
            // an IO error could occur
            if (files != null) {
                for (int i = 0; i < files.length; i++) {
                    indexDocument(writer, new File(file, files[i]));
                }/*from w  w w . ja  v  a 2  s.  c  om*/
            }
        }

        else {

            try {

                Map<String, String> infoWithField = HtmlParsing.parse(file);

                // make a new, empty document
                Document doc = new Document();

                Field title = new TextField("title", infoWithField.get("title"), Field.Store.YES);
                doc.add(title);
                Field path = new StringField("path", file.getPath(), Field.Store.YES);
                doc.add(path);

                FieldType type = new FieldType();
                type.setIndexed(true);
                type.setStored(true);
                type.setStoreTermVectors(true);
                type.setTokenized(true);
                type.setStoreTermVectorOffsets(true);
                Field highlighter = new Field("highlighterWords", infoWithField.get("text"), type);
                doc.add(highlighter);
                Field contents = new TextField("words", infoWithField.get("text"), Field.Store.YES);
                doc.add(contents);

                if (writer.getConfig().getOpenMode() == OpenMode.CREATE) {
                    System.out.println("adding " + file);
                    writer.addDocument(doc);
                }

                else {
                    System.out.println("updating " + file);
                    writer.updateDocument(new Term("path", file.getPath()), doc);
                }

            } finally {
            }
        }
    }
}

From source file:org.fnlp.app.lucene.demo.BuildIndex.java

License:Open Source License

/**
 * @param args//from  w  ww . j  a  v  a 2  s.  com
 * @throws IOException 
 * @throws LoadModelException 
 */
public static void main(String[] args) throws IOException, LoadModelException {
    String indexPath = "../tmp/lucene";
    System.out.println("Indexing to directory '" + indexPath + "'...");
    Date start = new Date();
    Directory dir = FSDirectory.open(new File(indexPath));//Dirctory dir-->FSDirectory
    //?? CNFactory
    CNFactory factory = CNFactory.getInstance("../models", Models.SEG_TAG);
    Analyzer analyzer = new FNLPAnalyzer(Version.LUCENE_47);
    IndexWriterConfig iwc = new IndexWriterConfig(Version.LUCENE_47, analyzer);
    iwc.setOpenMode(OpenMode.CREATE_OR_APPEND);
    IndexWriter writer = new IndexWriter(dir, iwc);

    String[] strs = new String[] { "?",
            "?????????",
            "????", "" };
    //Date start = new Date();
    for (int i = 0; i < strs.length; i++) {

        Document doc = new Document();

        Field field = new TextField("content", strs[i], Field.Store.YES);
        doc.add(field);
        if (writer.getConfig().getOpenMode() == OpenMode.CREATE) {
            writer.addDocument(doc);
        } else {
            writer.updateDocument(new Term("content", strs[i]), doc);
        }
    }
    writer.close();

    //??????
    //dir.close();
    //??????

    Date end = new Date();
    System.out.println(end.getTime() - start.getTime() + " total milliseconds");

}

From source file:org.hibernate.search.test.configuration.IndexWriterTuningAppliedTest.java

License:LGPL

@Test
public void testInfoStream() throws IOException {
    //Enable trace level on the magic category:
    Logger.getLogger(LogCategory.INFOSTREAM_LOGGER_CATEGORY.getName()).setLevel(Level.TRACE);
    AbstractWorkspaceImpl dvdsWorkspace = sfHolder.extractWorkspace(Dvd.class);
    AbstractWorkspaceImpl booksWorkspace = sfHolder.extractWorkspace(Book.class);
    IndexWriter dvdsIndexWriter = dvdsWorkspace.getIndexWriter();
    IndexWriter booksIndexWriter = booksWorkspace.getIndexWriter();
    try {//from ww  w.  j  a  v a 2 s  . co m
        Assert.assertFalse(dvdsIndexWriter.getConfig().getInfoStream().isEnabled("IW"));
        Assert.assertTrue(booksIndexWriter.getConfig().getInfoStream().isEnabled("IW"));
    } finally {
        booksIndexWriter.close();
        dvdsIndexWriter.close();
    }
}

From source file:org.neo4j.kernel.api.impl.index.backup.LuceneIndexSnapshotFileIterator.java

License:Open Source License

public static ResourceIterator<File> forIndex(File indexFolder, IndexWriter indexWriter) throws IOException {
    IndexDeletionPolicy deletionPolicy = indexWriter.getConfig().getIndexDeletionPolicy();
    if (deletionPolicy instanceof SnapshotDeletionPolicy) {
        SnapshotDeletionPolicy policy = (SnapshotDeletionPolicy) deletionPolicy;
        return hasCommits(indexWriter) ? new LuceneIndexSnapshotFileIterator(indexFolder, policy)
                : emptyIterator();/*from   ww  w.ja  v  a2s . c om*/
    } else {
        throw new UnsupportedIndexDeletionPolicy("Can't perform index snapshot with specified index deleiton "
                + "policy: " + deletionPolicy.getClass().getName() + ". " + "Only "
                + SnapshotDeletionPolicy.class.getName() + " is " + "supported");
    }
}

From source file:org.neo4j.kernel.api.impl.index.backup.LuceneIndexSnapshots.java

License:Open Source License

/**
 * Create index snapshot iterator for a writable index.
 * @param indexFolder index location folder
 * @param indexWriter index writer/*from   w  w  w  . j a  v  a  2  s  .co  m*/
 * @return index file name iterator
 * @throws IOException
 */
public static ResourceIterator<File> forIndex(File indexFolder, IndexWriter indexWriter) throws IOException {
    IndexDeletionPolicy deletionPolicy = indexWriter.getConfig().getIndexDeletionPolicy();
    if (deletionPolicy instanceof SnapshotDeletionPolicy) {
        SnapshotDeletionPolicy policy = (SnapshotDeletionPolicy) deletionPolicy;
        return hasCommits(indexWriter) ? new WritableIndexSnapshotFileIterator(indexFolder, policy)
                : emptyIterator();
    } else {
        throw new UnsupportedIndexDeletionPolicy("Can't perform index snapshot with specified index deletion "
                + "policy: " + deletionPolicy.getClass().getName() + ". " + "Only "
                + SnapshotDeletionPolicy.class.getName() + " is " + "supported");
    }
}

From source file:org.neo4j.kernel.api.impl.index.LuceneSnapshotter.java

License:Open Source License

ResourceIterator<File> snapshot(File indexDir, IndexWriter writer) throws IOException {
    SnapshotDeletionPolicy deletionPolicy = (SnapshotDeletionPolicy) writer.getConfig()
            .getIndexDeletionPolicy();// www  . j a  v  a  2  s  .  c o  m

    try {
        return new LuceneSnapshotIterator(indexDir, deletionPolicy.snapshot(ID), deletionPolicy);
    } catch (IllegalStateException e) {
        if (e.getMessage().equals(NO_INDEX_COMMIT_TO_SNAPSHOT)) {
            return emptyIterator();
        }
        throw e;
    }
}

From source file:org.openeclass.lucene.demo.IndexCourses.java

License:Open Source License

private static void indexCourses(IndexWriter writer, Connection con) throws SQLException, IOException {

    PreparedStatement sql = con//from w w  w.  ja  v a  2s.  c  o m
            .prepareStatement("SELECT id, title, keywords, code, public_code, prof_names, created FROM course");
    ResultSet rs = sql.executeQuery();
    int c = 0;

    while (rs.next()) {

        Long id = rs.getLong(1);
        String title = rs.getString(2);
        String keys = rs.getString(3);
        String code = rs.getString(4);
        String publicCode = rs.getString(5);
        String profNames = rs.getString(6);
        //Timestamp created = rs.getTimestamp(7);

        Document doc = new Document();

        Field idField = new Field("course_id", id.toString(), Field.Store.YES, Field.Index.NOT_ANALYZED);
        doc.add(idField);

        Field titleField = new Field("title", title, Field.Store.YES, Field.Index.ANALYZED);
        doc.add(titleField);

        Field keysField = new Field("keywords", keys, Field.Store.YES, Field.Index.ANALYZED);
        doc.add(keysField);

        Field codeField = new Field("code", code, Field.Store.YES, Field.Index.ANALYZED);
        doc.add(codeField);

        Field publicCodeField = new Field("public_code", publicCode, Field.Store.YES, Field.Index.ANALYZED);
        doc.add(publicCodeField);

        Field profsField = new Field("prof_names", profNames, Field.Store.YES, Field.Index.ANALYZED);
        doc.add(profsField);

        if (writer.getConfig().getOpenMode() == OpenMode.CREATE) {
            writer.addDocument(doc);
        } else {
            writer.updateDocument(new Term("course_id", id.toString()), doc);
        }

        c++;
    }

    System.out.println("total db rows: " + c);
    rs.close();
    sql.close();
}