Example usage for org.apache.lucene.index IndexWriter commit

List of usage examples for org.apache.lucene.index IndexWriter commit

Introduction

In this page you can find the example usage for org.apache.lucene.index IndexWriter commit.

Prototype

@Override
public final long commit() throws IOException 

Source Link

Document

Commits all pending changes (added and deleted documents, segment merges, added indexes, etc.) to the index, and syncs all referenced index files, such that a reader will see the changes and the index updates will survive an OS or machine crash or power loss.

Usage

From source file:com.sxc.lucene.index.IndexingTest.java

License:Apache License

public void testDeleteBeforeOptimize() throws IOException {
    IndexWriter writer = getWriter();
    assertEquals(2, writer.numDocs()); // A
    writer.deleteDocuments(new Term("id", "1")); // B
    writer.commit();
    assertTrue(writer.hasDeletions()); // 1
    assertEquals(2, writer.maxDoc()); // 2
    assertEquals(1, writer.numDocs()); // 2
    writer.close();//  www .  j a v  a 2s  .  com
}

From source file:com.sxc.lucene.index.IndexingTest.java

License:Apache License

public void testDeleteAfterOptimize() throws IOException {
    IndexWriter writer = getWriter();
    assertEquals(2, writer.numDocs());//w w w .j  a  va 2  s .  c o m
    writer.deleteDocuments(new Term("id", "1"));
    writer.forceMergeDeletes(); // 3
    writer.commit();
    assertFalse(writer.hasDeletions());
    assertEquals(1, writer.maxDoc()); // C
    assertEquals(1, writer.numDocs()); // C
    writer.close();
}

From source file:com.vmware.dcp.services.common.LuceneBlobIndexService.java

License:Open Source License

public IndexWriter createWriter(File directory) throws IOException {
    Directory dir = MMapDirectory.open(directory.toPath());
    Analyzer analyzer = new SimpleAnalyzer();
    IndexWriterConfig iwc = new IndexWriterConfig(analyzer);
    if (this.indexOptions.contains(BlobIndexOption.CREATE)) {
        iwc.setOpenMode(OpenMode.CREATE);
    } else {/*from  w  w  w  . j  av  a 2 s.  c o m*/
        iwc.setOpenMode(OpenMode.CREATE_OR_APPEND);
    }
    Long totalMBs = getHost().getServiceMemoryLimitMB(getSelfLink(), MemoryLimitType.EXACT);
    if (totalMBs != null) {
        totalMBs = Math.max(1, totalMBs);
        iwc.setRAMBufferSizeMB(totalMBs);
    }
    IndexWriter w = new IndexWriter(dir, iwc);
    w.commit();
    return w;
}

From source file:com.vmware.dcp.services.common.LuceneBlobIndexService.java

License:Open Source License

private void close(IndexWriter wr) {
    try {// ww  w . ja v  a 2 s.c om
        if (wr == null) {
            return;
        }
        wr.commit();
        wr.close();
    } catch (Throwable e) {

    }
}

From source file:com.vmware.dcp.services.common.LuceneBlobIndexService.java

License:Open Source License

private void handleMaintenanceSafe(Operation post) {
    try {/* www.  j  av a  2  s  .  c  o  m*/
        IndexWriter w = this.writer;
        if (w == null) {
            post.complete();
            return;
        }
        w.commit();
        setStat(LuceneDocumentIndexService.STAT_NAME_INDEXED_DOCUMENT_COUNT, w.maxDoc());
        File directory = new File(new File(getHost().getStorageSandbox()), this.indexDirectory);
        String[] list = directory.list();
        int count = list == null ? 0 : list.length;
        // for debugging use only: we need to verify that the number of index files stays bounded
        if (count > LuceneDocumentIndexService.INDEX_FILE_COUNT_THRESHOLD_FOR_REOPEN) {
            consolidateIndexFiles();
        }
        post.complete();
    } catch (Throwable e) {
        logSevere(e);
        post.fail(e);
    }
}

From source file:com.vmware.dcp.services.common.LuceneDocumentIndexService.java

License:Open Source License

private void close(IndexWriter wr) {
    try {//from w w w.j  av  a 2s. co m
        if (wr == null) {
            return;
        }
        logInfo("Document count: %d ", wr.maxDoc());
        wr.commit();
        wr.close();
    } catch (Throwable e) {

    }
}

From source file:com.vmware.dcp.services.common.LuceneDocumentIndexService.java

License:Open Source License

private void handleMaintenanceImpl(boolean forceMerge) throws Throwable {

    int count = 0;
    try {//from w ww.  j  a  va  2s .co m
        long start = Utils.getNowMicrosUtc();

        IndexWriter w = this.writer;
        if (w == null) {
            return;
        }

        setStat(STAT_NAME_INDEXED_DOCUMENT_COUNT, w.maxDoc());

        adjustStat(STAT_NAME_COMMIT_COUNT, 1.0);
        long end = Utils.getNowMicrosUtc();
        setStat(STAT_NAME_COMMIT_DURATION_MICROS, end - start);

        applyDocumentExpirationPolicy(w);
        applyDocumentVersionRetentionPolicy(w);
        w.commit();

        File directory = new File(new File(getHost().getStorageSandbox()), this.indexDirectory);
        String[] list = directory.list();
        count = list == null ? 0 : list.length;

        if (!forceMerge && count < INDEX_FILE_COUNT_THRESHOLD_FOR_REOPEN) {
            return;
        }
        reOpenWriterSynchronously();
    } catch (Throwable e) {
        logWarning("Attempting recovery due to error: %s", e.getMessage());
        reOpenWriterSynchronously();
        throw e;
    }
}

From source file:com.vmware.xenon.services.common.LuceneBlobIndexService.java

License:Open Source License

private void close(IndexWriter wr) {
    try {//from  w  w  w  . ja v  a  2  s .c om
        if (wr == null) {
            return;
        }
        wr.commit();
        wr.close();
        this.buffer = null;
    } catch (Throwable e) {

    }
}

From source file:com.vmware.xenon.services.common.LuceneBlobIndexService.java

License:Open Source License

private void handleMaintenanceSafe(Operation post) {
    try {//from   w ww . j  a  va  2  s . co  m
        IndexWriter w = this.writer;
        if (w == null) {
            post.complete();
            return;
        }
        w.commit();

        setStat(LuceneDocumentIndexService.STAT_NAME_INDEXED_DOCUMENT_COUNT, w.maxDoc());
        File directory = new File(new File(getHost().getStorageSandbox()), this.indexDirectory);
        String[] list = directory.list();
        int count = list == null ? 0 : list.length;
        if (count > LuceneDocumentIndexService.getIndexFileCountThresholdForWriterRefresh()) {
            logInfo("Index file count: %d, document count: %d", count, w.maxDoc());
            closeSearcherSafe();
            w.deleteUnusedFiles();
        }

        // Periodically free the buffer. If we are busy serializing requests, they will be ahead of
        // maintenance in the single threaded executor queue, so they will get to re-use the existing
        // allocation
        this.buffer = null;
        post.complete();
    } catch (Throwable e) {
        logSevere(e);
        post.fail(e);
    }
}

From source file:com.vmware.xenon.services.common.LuceneDocumentIndexBackupService.java

License:Open Source License

private void takeSnapshot(Path destinationPath, boolean isZipBackup, InternalDocumentIndexInfo indexInfo)
        throws IOException {

    IndexWriter writer = indexInfo.indexWriter;
    boolean isInMemoryIndex = indexInfo.indexDirectory == null;

    URI storageSandbox = getHost().getStorageSandbox();

    SnapshotDeletionPolicy snapshotter = null;
    IndexCommit commit = null;/*  w ww.j  a v  a2s  . c om*/
    long backupStartTime = System.currentTimeMillis();
    try {
        // Create a snapshot so the index files won't be deleted.
        writer.commit();
        snapshotter = (SnapshotDeletionPolicy) writer.getConfig().getIndexDeletionPolicy();
        commit = snapshotter.snapshot();

        if (isZipBackup) {
            Path tempDir = null;
            try {
                List<URI> fileList = new ArrayList<>();
                if (isInMemoryIndex) {
                    tempDir = Files.createTempDirectory("lucene-in-memory-backup");
                    copyInMemoryLuceneIndexToDirectory(commit, tempDir);
                    List<URI> files = Files.list(tempDir).map(Path::toUri).collect(toList());
                    fileList.addAll(files);
                } else {

                    Path indexDirectoryPath = Paths.get(storageSandbox).resolve(indexInfo.indexDirectory);
                    List<URI> files = commit.getFileNames().stream().map(indexDirectoryPath::resolve)
                            .map(Path::toUri).collect(toList());
                    fileList.addAll(files);
                }

                // Add files in the commit to a zip file.
                FileUtils.zipFiles(fileList, destinationPath.toFile());
            } finally {
                if (tempDir != null) {
                    FileUtils.deleteFiles(tempDir.toFile());
                }
            }
        } else {
            // incremental backup

            // create destination dir if not exist
            if (!Files.exists(destinationPath)) {
                Files.createDirectory(destinationPath);
            }

            Set<String> sourceFileNames = new HashSet<>(commit.getFileNames());

            Set<String> destFileNames = Files.list(destinationPath).filter(Files::isRegularFile)
                    .map(path -> path.getFileName().toString()).collect(toSet());

            Path tempDir = null;
            try {
                Path indexDirectoryPath;
                if (isInMemoryIndex) {
                    // copy files into temp directory and point index directory path to temp dir
                    tempDir = Files.createTempDirectory("lucene-in-memory-backup");
                    copyInMemoryLuceneIndexToDirectory(commit, tempDir);
                    indexDirectoryPath = tempDir;
                } else {
                    indexDirectoryPath = Paths.get(storageSandbox).resolve(indexInfo.indexDirectory);
                }

                // add files exist in source but not in dest
                Set<String> toAdd = new HashSet<>(sourceFileNames);
                toAdd.removeAll(destFileNames);
                for (String filename : toAdd) {
                    Path source = indexDirectoryPath.resolve(filename);
                    Path target = destinationPath.resolve(filename);
                    Files.copy(source, target);
                }

                // delete files exist in dest but not in source
                Set<String> toDelete = new HashSet<>(destFileNames);
                toDelete.removeAll(sourceFileNames);
                for (String filename : toDelete) {
                    Path path = destinationPath.resolve(filename);
                    Files.delete(path);
                }

                long backupEndTime = System.currentTimeMillis();
                logInfo("Incremental backup performed. dir=%s, added=%d, deleted=%d, took=%dms",
                        destinationPath, toAdd.size(), toDelete.size(), backupEndTime - backupStartTime);
            } finally {
                if (tempDir != null) {
                    FileUtils.deleteFiles(tempDir.toFile());
                }
            }
        }
    } finally {
        if (snapshotter != null && commit != null) {
            snapshotter.release(commit);
        }
        writer.deleteUnusedFiles();
    }
}