Example usage for org.apache.lucene.index IndexWriter commit

List of usage examples for org.apache.lucene.index IndexWriter commit

Introduction

In this page you can find the example usage for org.apache.lucene.index IndexWriter commit.

Prototype

@Override
public final long commit() throws IOException 

Source Link

Document

Commits all pending changes (added and deleted documents, segment merges, added indexes, etc.) to the index, and syncs all referenced index files, such that a reader will see the changes and the index updates will survive an OS or machine crash or power loss.

Usage

From source file:com.redsqirl.SimpleFileIndexer.java

License:Open Source License

public void merge(String indexPath, String pathA, String pathB)
        throws CorruptIndexException, LockObtainFailedException, IOException {
    File dir = new File(indexPath);
    SimpleFSDirectory d = new SimpleFSDirectory(dir);
    IndexWriter writer = new IndexWriter(d, new StandardAnalyzer(Version.LUCENE_CURRENT),
            IndexWriter.MaxFieldLength.LIMITED);

    //File INDEXES_DIR = new File(pathA);

    Directory indexes[] = new Directory[2];
    indexes[0] = FSDirectory.open(new File(pathA));
    indexes[1] = FSDirectory.open(new File(pathB));

    /*//from   ww  w .  j a v  a 2  s  .  c o m
    for (int i = 0; i < INDEXES_DIR.list().length; i++) {
       System.out.println("Adding: " + INDEXES_DIR.list()[i]);
       File fil = new File(pathB+"/"+INDEXES_DIR.list()[i]);
    }
    indexes[i] = FSDirectory.open(pathB);
     */

    logger.info(" Merging added indexes ");
    writer.addIndexesNoOptimize(indexes);
    logger.info(" Optimizing index ");

    indexes[0].close();
    indexes[1].close();

    writer.optimize();
    writer.commit();
    writer.close();
}

From source file:com.revorg.goat.IndexManager.java

License:Open Source License

/**
 * deletes all of the documents in a collection. Causes the collection to be taken offline, preventing searches.
 *
 * @param indexPath Directory that contains the Lucene Collection
 * @throws Exception//from w ww  .  j a  v  a2 s  .c o  m
 * @return ActionResult
 */
public static String purgeIndex(String indexPath) {

    try {
        String indexExists = isIndexExistant(indexPath);
        if (indexExists.equalsIgnoreCase("Yes")) {
            //StandardAnalyzer new StandardAnalyzer() = new StandardAnalyzer();    //Initialize Class
            IndexWriter writer = new IndexWriter(indexPath, new StandardAnalyzer(), true,
                    IndexWriter.MaxFieldLength.LIMITED);
            writer.commit();
            writer.close();
            ActionResult = "Success";
            return ActionResult;
        } else {
            throw new Exception("Unable to open index");
        }
    } catch (Exception e) {
        ActionResultError = " caught a " + e.getClass() + " with message: " + e.getMessage();
        System.out.println("Failure to purge index: " + indexPath);
    }
    ActionResult = "Failure";
    return ActionResult + ActionResultError;
}

From source file:com.revorg.goat.IndexManager.java

License:Open Source License

/**
 * Counts the total number of documents in the index.
 *
 * @param indexPath Directory that contains the Lucene Collection
 * @throws Exception/*from  w  ww .ja  v  a 2s . c  om*/
 * @return ActionResult
 */
public static String getIndexCount(String indexPath) {

    try {
        //StandardAnalyzer new StandardAnalyzer() = new StandardAnalyzer();    //Initialize Class
        IndexWriter writer = new IndexWriter(indexPath, new StandardAnalyzer(), false,
                IndexWriter.MaxFieldLength.LIMITED);
        int totalInIndex = writer.maxDoc();
        ActionResult = Integer.toString(totalInIndex);
        writer.commit();
        writer.close();
        return ActionResult;
    } catch (Exception e) {
        ActionResultError = " caught a " + e.getClass() + " with message: " + e.getMessage();
        System.out.println("Failure to count index: " + indexPath);
    }
    ActionResult = "Failure";
    return ActionResult + ActionResultError;
}

From source file:com.revorg.goat.IndexManager.java

License:Open Source License

/**
 * Merges two indexes together./*from  w w  w.  j av  a 2  s .  c o  m*/
 *
 * @param primaryIndex      The Primary Lucene Index
 * @param secondaryIndex    The Secondary Lucene Index that should be merged 
 * @throws Exception
 * @return ActionResult
 */
public static String mergeIndexes(String primaryIndex, String secondaryIndex) {
    try {

        //Writer Class
        IndexWriter writer = new IndexWriter(primaryIndex, new StandardAnalyzer(), false,
                IndexWriter.MaxFieldLength.LIMITED);
        //Merge Index #2 to Index #1
        writer.addIndexesNoOptimize(new Directory[] { FSDirectory.getDirectory(secondaryIndex) });

        writer.commit();
        writer.optimize();
        writer.close();
        ActionResult = "Success";
        return ActionResult;
    } catch (Exception e) {
        ActionResultError = " caught a " + e.getClass() + " with message: " + e.getMessage();
        System.out.println("Failure to merge index: " + primaryIndex);
        System.out.println(ActionResultError);
    }
    ActionResult = "Failure";
    return ActionResult + ActionResultError;
}

From source file:com.rocana.lucene.codec.v1.TestRocanaPerFieldPostingsFormat2.java

License:Apache License

@Test
public void testMergeUnusedPerFieldCodec() throws IOException {
    Directory dir = newDirectory();//w  w  w  .  j a v a  2 s . c  o  m
    IndexWriterConfig iwconf = newIndexWriterConfig(new MockAnalyzer(random())).setOpenMode(OpenMode.CREATE)
            .setCodec(new MockCodec());
    IndexWriter writer = newWriter(dir, iwconf);
    addDocs(writer, 10);
    writer.commit();
    addDocs3(writer, 10);
    writer.commit();
    addDocs2(writer, 10);
    writer.commit();
    assertEquals(30, writer.maxDoc());
    TestUtil.checkIndex(dir);
    writer.forceMerge(1);
    assertEquals(30, writer.maxDoc());
    writer.close();
    dir.close();
}

From source file:com.rocana.lucene.codec.v1.TestRocanaPerFieldPostingsFormat2.java

License:Apache License

@Test
public void testChangeCodecAndMerge() throws IOException {
    Directory dir = newDirectory();/*from  ww w.  j  a  v a2  s  .c  o m*/
    if (VERBOSE) {
        System.out.println("TEST: make new index");
    }
    IndexWriterConfig iwconf = newIndexWriterConfig(new MockAnalyzer(random())).setOpenMode(OpenMode.CREATE)
            .setCodec(new MockCodec());
    iwconf.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);
    //((LogMergePolicy) iwconf.getMergePolicy()).setMergeFactor(10);
    IndexWriter writer = newWriter(dir, iwconf);

    addDocs(writer, 10);
    writer.commit();
    assertQuery(new Term("content", "aaa"), dir, 10);
    if (VERBOSE) {
        System.out.println("TEST: addDocs3");
    }
    addDocs3(writer, 10);
    writer.commit();
    writer.close();

    assertQuery(new Term("content", "ccc"), dir, 10);
    assertQuery(new Term("content", "aaa"), dir, 10);
    Codec codec = iwconf.getCodec();

    iwconf = newIndexWriterConfig(new MockAnalyzer(random())).setOpenMode(OpenMode.APPEND).setCodec(codec);
    //((LogMergePolicy) iwconf.getMergePolicy()).setNoCFSRatio(0.0);
    //((LogMergePolicy) iwconf.getMergePolicy()).setMergeFactor(10);
    iwconf.setMaxBufferedDocs(IndexWriterConfig.DISABLE_AUTO_FLUSH);

    iwconf.setCodec(new MockCodec2()); // uses standard for field content
    writer = newWriter(dir, iwconf);
    // swap in new codec for currently written segments
    if (VERBOSE) {
        System.out.println("TEST: add docs w/ Standard codec for content field");
    }
    addDocs2(writer, 10);
    writer.commit();
    codec = iwconf.getCodec();
    assertEquals(30, writer.maxDoc());
    assertQuery(new Term("content", "bbb"), dir, 10);
    assertQuery(new Term("content", "ccc"), dir, 10); ////
    assertQuery(new Term("content", "aaa"), dir, 10);

    if (VERBOSE) {
        System.out.println("TEST: add more docs w/ new codec");
    }
    addDocs2(writer, 10);
    writer.commit();
    assertQuery(new Term("content", "ccc"), dir, 10);
    assertQuery(new Term("content", "bbb"), dir, 20);
    assertQuery(new Term("content", "aaa"), dir, 10);
    assertEquals(40, writer.maxDoc());

    if (VERBOSE) {
        System.out.println("TEST: now optimize");
    }
    writer.forceMerge(1);
    assertEquals(40, writer.maxDoc());
    writer.close();
    assertQuery(new Term("content", "ccc"), dir, 10);
    assertQuery(new Term("content", "bbb"), dir, 20);
    assertQuery(new Term("content", "aaa"), dir, 10);

    dir.close();
}

From source file:com.rocana.lucene.codec.v1.TestRocanaPerFieldPostingsFormat2.java

License:Apache License

@Test
public void testStressPerFieldCodec() throws IOException {
    Directory dir = newDirectory(random());
    final int docsPerRound = 97;
    int numRounds = atLeast(1);
    for (int i = 0; i < numRounds; i++) {
        int num = TestUtil.nextInt(random(), 30, 60);
        IndexWriterConfig config = newIndexWriterConfig(random(), new MockAnalyzer(random()));
        config.setOpenMode(OpenMode.CREATE_OR_APPEND);
        IndexWriter writer = newWriter(dir, config);
        for (int j = 0; j < docsPerRound; j++) {
            final Document doc = new Document();
            for (int k = 0; k < num; k++) {
                FieldType customType = new FieldType(TextField.TYPE_NOT_STORED);
                customType.setTokenized(random().nextBoolean());
                customType.setOmitNorms(random().nextBoolean());
                Field field = newField("" + k, TestUtil.randomRealisticUnicodeString(random(), 128),
                        customType);/*from   w  w w .j  a  v  a 2  s  . c o m*/
                doc.add(field);
            }
            writer.addDocument(doc);
        }
        if (random().nextBoolean()) {
            writer.forceMerge(1);
        }
        writer.commit();
        assertEquals((i + 1) * docsPerRound, writer.maxDoc());
        writer.close();
    }
    dir.close();
}

From source file:com.searchlocal.lucene.IndexBeanList.java

License:Open Source License

/**
 * ?//  w ww .  ja va  2 s.c o m
 * 
 * @param namespace ?
 * @param doctype 
 * @param beanList 
 */
public static void makeindex(String namespace, String doctype, List beanList) {

    IndexWriterFactory factory = new IndexWriterFactory();

    IndexWriter indexWriter = factory.getWriter(namespace);
    try {
        if (Constant.FileNameClassify.EXCEL.equals(doctype)) {
            for (Iterator itera = beanList.iterator(); itera.hasNext();) {
                ExcelFileBean bean = (ExcelFileBean) itera.next();
                makeExcelindex(indexWriter, bean);
            }
            factory.optimize(indexWriter);
        }
        if (Constant.FileNameClassify.WORD.equals(doctype)) {
            for (Iterator itera = beanList.iterator(); itera.hasNext();) {
                WordFileBean bean = (WordFileBean) itera.next();
                makeWordindex(indexWriter, bean);
            }
            factory.optimize(indexWriter);
        }
        if (Constant.FileNameClassify.PDF.equals(doctype)) {
            for (Iterator itera = beanList.iterator(); itera.hasNext();) {
                PdfFileBean bean = (PdfFileBean) itera.next();
                makePdfindex(indexWriter, bean);
            }
            factory.optimize(indexWriter);
        }
        if (Constant.FileNameClassify.PPT.equals(doctype)) {
            for (Iterator itera = beanList.iterator(); itera.hasNext();) {
                PptFileBean bean = (PptFileBean) itera.next();
                makePptindex(indexWriter, bean);
            }
            factory.optimize(indexWriter);
        }
        if (Constant.FileNameClassify.CHM.equals(doctype)) {
            for (Iterator itera = beanList.iterator(); itera.hasNext();) {
                ChmFileBean bean = (ChmFileBean) itera.next();
                makeChmindex(indexWriter, bean);
            }
            IndexWriterFactory.optimize(indexWriter);
        }
        if (Constant.FileNameClassify.HTML.equals(doctype)) {
            for (Iterator itera = beanList.iterator(); itera.hasNext();) {
                HtmlFileBean bean = (HtmlFileBean) itera.next();
                makeHtmlindex(indexWriter, bean);
            }
            factory.optimize(indexWriter);
        }
        if (Constant.FileNameClassify.TXT.equals(doctype)) {
            for (Iterator itera = beanList.iterator(); itera.hasNext();) {
                TxtFileBean bean = (TxtFileBean) itera.next();
                makeTxtindex(indexWriter, bean);
            }
            factory.optimize(indexWriter);
        }

        // IndexWriter
        if (indexWriter != null) {
            indexWriter.commit();
            indexWriter.close();
            factory.removeIndexWriter(namespace);
        }

    } catch (InterruptedException e) {
        // TODO ??(I/O)
        e.printStackTrace();
    } catch (IOException e) {
        // TODO ??(I/O)
        e.printStackTrace();
    }
}

From source file:com.senseidb.abacus.api.codec.CodecTest.java

License:Apache License

static Directory buildIndex(Iterable<String> datasrc, Codec codec) throws Exception {
    String idxname = codec == null ? "lucene" : codec.getName();
    Directory dir = FSDirectory.open(new File("/tmp/codectest", idxname));//new RAMDirectory();
    //Directory dir = new RAMDirectory();
    IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_44, new StandardAnalyzer(Version.LUCENE_44));
    conf.setUseCompoundFile(false);//w  ww  .  j  a va 2 s.c o m
    if (codec != null) {
        conf.setCodec(codec);
    }

    IndexWriter writer = new IndexWriter(dir, conf);

    for (String doc : datasrc) {
        if (doc == null)
            break;
        doc = doc.trim();
        if (doc.length() == 0)
            continue;
        Document d = new Document();
        FieldType ft = new FieldType();
        ft.setIndexed(true);
        ft.setStored(false);
        ft.setIndexOptions(IndexOptions.DOCS_ONLY);
        ft.setOmitNorms(true);
        Field f = new Field(FIELD, doc, ft);
        d.add(f);
        writer.addDocument(d);
    }
    writer.forceMerge(1);
    writer.commit();
    writer.close();
    return dir;
}

From source file:com.senseidb.search.node.inmemory.InMemorySenseiService.java

License:Apache License

private void addDocuments(Directory directory, IndexWriter writer, List<JSONObject> documents) {
    try {//from w ww .  j  a  va 2s.c  om
        writer.deleteAll();
        for (JSONObject doc : documents) {
            if (doc == null)
                continue;
            writer.addDocument(buildDoc(doc));
            pluggableSearchEngineManager.update(doc, "");
        }
        writer.commit();

    } catch (Exception e) {
        throw new RuntimeException(e);
    }
}