Example usage for org.apache.lucene.index SnapshotDeletionPolicy SnapshotDeletionPolicy

List of usage examples for org.apache.lucene.index SnapshotDeletionPolicy SnapshotDeletionPolicy

Introduction

In this page you can find the example usage for org.apache.lucene.index SnapshotDeletionPolicy SnapshotDeletionPolicy.

Prototype

public SnapshotDeletionPolicy(IndexDeletionPolicy primary) 

Source Link

Document

Sole constructor, taking the incoming IndexDeletionPolicy to wrap.

Usage

From source file:com.b2international.index.compat.SingleDirectoryIndexImpl.java

License:Apache License

protected void initLucene(final File indexDirectory, final boolean clean) {
    try {// w  w  w.j ava2 s .  c o  m
        this.directory = Directories.openFile(indexDirectory.toPath());
        final Analyzer analyzer = new ComponentTermAnalyzer();
        final IndexWriterConfig config = new IndexWriterConfig(analyzer);
        config.setOpenMode(clean ? OpenMode.CREATE : OpenMode.CREATE_OR_APPEND);
        config.setIndexDeletionPolicy(new SnapshotDeletionPolicy(config.getIndexDeletionPolicy()));
        this.writer = new IndexWriter(directory, config);
        this.writer.commit(); // Create index if it didn't exist
        this.manager = new SearcherManager(directory, new SearchWarmerFactory());
    } catch (final IOException e) {
        throw new RuntimeException(e.getMessage(), e);
    }
}

From source file:com.leavesfly.lia.admin.Fragments.java

License:Apache License

public void test() throws Exception {
    Directory dir = null;/* ww w .  jav  a2  s . c o m*/
    Analyzer analyzer = null;
    // START
    IndexDeletionPolicy policy = new KeepOnlyLastCommitDeletionPolicy();
    SnapshotDeletionPolicy snapshotter = new SnapshotDeletionPolicy(policy);
    IndexWriter writer = new IndexWriter(dir, analyzer, snapshotter, IndexWriter.MaxFieldLength.UNLIMITED);
    // END

    try {
        IndexCommit commit = (IndexCommit) snapshotter.snapshot();
        Collection<String> fileNames = commit.getFileNames();
        /*<iterate over & copy files from fileNames>*/
    } finally {
        snapshotter.release();
    }
}

From source file:com.mathworks.xzheng.admin.Fragments.java

License:Apache License

public void test() throws Exception {
    Directory dir = null;//from   w w w .  j av  a  2  s.  c o m
    Analyzer analyzer = null;
    // START
    IndexDeletionPolicy policy = new KeepOnlyLastCommitDeletionPolicy();
    SnapshotDeletionPolicy snapshotter = new SnapshotDeletionPolicy(policy);

    IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_46, analyzer);
    config.setIndexDeletionPolicy(snapshotter);
    IndexWriter writer = new IndexWriter(dir, config);
    // END

    IndexCommit commit = null;
    try {
        commit = (IndexCommit) snapshotter.snapshot();
        Collection<String> fileNames = commit.getFileNames();
        /*<iterate over & copy files from fileNames>*/
    } finally {
        snapshotter.release(commit);
    }
}

From source file:com.qwazr.search.bench.LuceneCommonIndex.java

License:Apache License

LuceneCommonIndex(final Path rootDirectory, final String schemaName, final String indexName,
        final double ramBufferSize, final boolean useCompoundFile) throws IOException {

    final Path schemaDirectory = Files.createDirectory(rootDirectory.resolve(schemaName));
    this.indexDirectory = Files.createDirectory(schemaDirectory.resolve(indexName));
    this.luceneDirectory = indexDirectory.resolve("data");
    this.dataDirectory = FSDirectory.open(luceneDirectory);
    final IndexWriterConfig indexWriterConfig = new IndexWriterConfig(
            new PerFieldAnalyzerWrapper(new StandardAnalyzer()));
    indexWriterConfig.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND);
    indexWriterConfig.setRAMBufferSizeMB(ramBufferSize);

    final ConcurrentMergeScheduler mergeScheduler = new ConcurrentMergeScheduler();
    mergeScheduler.setMaxMergesAndThreads(MAX_SSD_MERGE_THREADS, MAX_SSD_MERGE_THREADS);
    indexWriterConfig.setMergeScheduler(mergeScheduler);
    indexWriterConfig.setUseCompoundFile(useCompoundFile);

    final TieredMergePolicy mergePolicy = new TieredMergePolicy();
    indexWriterConfig.setMergePolicy(mergePolicy);

    // We use snapshots deletion policy
    final SnapshotDeletionPolicy snapshotDeletionPolicy = new SnapshotDeletionPolicy(
            indexWriterConfig.getIndexDeletionPolicy());
    indexWriterConfig.setIndexDeletionPolicy(snapshotDeletionPolicy);

    this.indexWriter = new IndexWriter(this.dataDirectory, indexWriterConfig);
    this.localReplicator = new LocalReplicator();
}

From source file:com.qwazr.search.index.IndexInstance.java

License:Apache License

/**
 * @param schema/*from   w  ww.j  a v  a2s.  c  o m*/
 * @param indexDirectory
 * @return
 */
final static IndexInstance newInstance(SchemaInstance schema, File indexDirectory,
        IndexSettingsDefinition settings)
        throws ServerException, IOException, ReflectiveOperationException, InterruptedException {
    UpdatableAnalyzer indexAnalyzer = null;
    UpdatableAnalyzer queryAnalyzer = null;
    IndexWriter indexWriter = null;
    Directory dataDirectory = null;
    try {

        if (!indexDirectory.exists())
            indexDirectory.mkdir();
        if (!indexDirectory.isDirectory())
            throw new IOException(
                    "This name is not valid. No directory exists for this location: " + indexDirectory);

        FileSet fileSet = new FileSet(indexDirectory);

        //Loading the settings
        if (settings == null) {
            settings = fileSet.settingsFile.exists()
                    ? JsonMapper.MAPPER.readValue(fileSet.settingsFile, IndexSettingsDefinition.class)
                    : IndexSettingsDefinition.EMPTY;
        } else {
            JsonMapper.MAPPER.writeValue(fileSet.settingsFile, settings);
        }

        //Loading the fields
        File fieldMapFile = new File(indexDirectory, FIELDS_FILE);
        LinkedHashMap<String, FieldDefinition> fieldMap = fieldMapFile.exists()
                ? JsonMapper.MAPPER.readValue(fieldMapFile, FieldDefinition.MapStringFieldTypeRef)
                : new LinkedHashMap<>();

        //Loading the fields
        File analyzerMapFile = new File(indexDirectory, ANALYZERS_FILE);
        LinkedHashMap<String, AnalyzerDefinition> analyzerMap = analyzerMapFile.exists()
                ? JsonMapper.MAPPER.readValue(analyzerMapFile, AnalyzerDefinition.MapStringAnalyzerTypeRef)
                : new LinkedHashMap<>();

        AnalyzerContext context = new AnalyzerContext(analyzerMap, fieldMap);
        indexAnalyzer = new UpdatableAnalyzer(context, context.indexAnalyzerMap);
        queryAnalyzer = new UpdatableAnalyzer(context, context.queryAnalyzerMap);

        // Open and lock the data directory
        dataDirectory = FSDirectory.open(fileSet.dataDirectory.toPath());

        // Set
        IndexWriterConfig indexWriterConfig = new IndexWriterConfig(indexAnalyzer);
        if (settings != null && settings.similarity_class != null)
            indexWriterConfig.setSimilarity(IndexUtils.findSimilarity(settings.similarity_class));
        indexWriterConfig.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND);
        SnapshotDeletionPolicy snapshotDeletionPolicy = new SnapshotDeletionPolicy(
                indexWriterConfig.getIndexDeletionPolicy());
        indexWriterConfig.setIndexDeletionPolicy(snapshotDeletionPolicy);
        indexWriter = new IndexWriter(dataDirectory, indexWriterConfig);
        if (indexWriter.hasUncommittedChanges())
            indexWriter.commit();

        // Finally we build the SearchSearcherManger
        SearcherManager searcherManager = new SearcherManager(indexWriter, true, null);

        return new IndexInstance(schema, dataDirectory, settings, analyzerMap, fieldMap, fileSet, indexWriter,
                searcherManager, queryAnalyzer);
    } catch (IOException | ServerException | ReflectiveOperationException | InterruptedException e) {
        // We failed in opening the index. We close everything we can
        if (queryAnalyzer != null)
            IOUtils.closeQuietly(queryAnalyzer);
        if (indexAnalyzer != null)
            IOUtils.closeQuietly(indexAnalyzer);
        if (indexWriter != null)
            IOUtils.closeQuietly(indexWriter);
        if (dataDirectory != null)
            IOUtils.closeQuietly(dataDirectory);
        throw e;
    }
}

From source file:com.vmware.dcp.services.common.LuceneDocumentIndexService.java

License:Open Source License

public IndexWriter createWriter(File directory, boolean doUpgrade) throws Exception {
    Directory dir = MMapDirectory.open(directory.toPath());
    Analyzer analyzer = new SimpleAnalyzer();

    // Upgrade the index in place if necessary.
    if (doUpgrade && DirectoryReader.indexExists(dir)) {
        upgradeIndex(dir);/*from w w  w.  j  a  v a 2  s . c  om*/
    }

    IndexWriterConfig iwc = new IndexWriterConfig(analyzer);
    iwc.setOpenMode(OpenMode.CREATE_OR_APPEND);
    iwc.setIndexDeletionPolicy(new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy()));
    Long totalMBs = getHost().getServiceMemoryLimitMB(getSelfLink(), MemoryLimitType.EXACT);
    if (totalMBs != null) {
        // give half to the index, the other half we keep for service caching context
        totalMBs = Math.max(1, totalMBs / 2);
        iwc.setRAMBufferSizeMB(totalMBs);
    }

    this.writer = new IndexWriter(dir, iwc);
    this.writer.commit();
    this.indexUpdateTimeMicros = Utils.getNowMicrosUtc();
    this.indexWriterCreationTimeMicros = this.indexUpdateTimeMicros;
    return this.writer;
}

From source file:com.vmware.xenon.services.common.LuceneDocumentIndexService.java

License:Open Source License

public IndexWriter createWriter(File directory, boolean doUpgrade) throws Exception {
    Analyzer analyzer = new SimpleAnalyzer();
    IndexWriterConfig iwc = new IndexWriterConfig(analyzer);
    Long totalMBs = getHost().getServiceMemoryLimitMB(getSelfLink(), MemoryLimitType.EXACT);
    if (totalMBs != null) {
        long cacheSizeMB = (totalMBs * 3) / 4;
        cacheSizeMB = Math.max(1, cacheSizeMB);
        iwc.setRAMBufferSizeMB(cacheSizeMB);
        this.linkAccessMemoryLimitMB = totalMBs / 4;
    }//from  ww w  . j a va  2  s .c  om

    Directory dir = MMapDirectory.open(directory.toPath());

    // Upgrade the index in place if necessary.
    if (doUpgrade && DirectoryReader.indexExists(dir)) {
        upgradeIndex(dir);
    }

    iwc.setOpenMode(OpenMode.CREATE_OR_APPEND);
    iwc.setIndexDeletionPolicy(new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy()));

    IndexWriter w = new IndexWriter(dir, iwc);
    w.commit();

    synchronized (this.searchSync) {
        this.writer = w;
        this.linkAccessTimes.clear();
        this.indexUpdateTimeMicros = Utils.getNowMicrosUtc();
        this.indexWriterCreationTimeMicros = this.indexUpdateTimeMicros;
    }
    return this.writer;
}

From source file:io.datalayer.lucene.snapshot.IndexSnapshotTest.java

License:Apache License

@Test
@Ignore//from  w  w w  .  jav  a 2  s .  c  o m
public void testSnapshot() throws Exception {

    Directory dir = null;

    IndexDeletionPolicy policy = new KeepOnlyLastCommitDeletionPolicy();
    SnapshotDeletionPolicy snapshotter = new SnapshotDeletionPolicy(policy);
    IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_44,
            AosAnalyser.NO_LIMIT_TOKEN_COUNT_SIMPLE_ANALYSER);
    conf.setIndexDeletionPolicy(snapshotter);

    IndexWriter writer = new IndexWriter(dir, conf);

    IndexCommit indexCommit = null;
    try {
        indexCommit = snapshotter.snapshot();
        Collection<String> fileNames = indexCommit.getFileNames();
        /* <iterate over & copy files from fileNames> */
    } finally {
        if (indexCommit != null) {
            snapshotter.release(indexCommit);
        }
    }

    writer.close();

}

From source file:org.elasticsearch.index.shard.RefreshListenersTests.java

License:Apache License

@Before
public void setupListeners() throws Exception {
    // Setup dependencies of the listeners
    maxListeners = randomIntBetween(1, 1000);
    listeners = new RefreshListeners(() -> maxListeners, () -> engine.refresh("too-many-listeners"),
            // Immediately run listeners rather than adding them to the listener thread pool like IndexShard does to simplify the test.
            Runnable::run, logger);

    // Now setup the InternalEngine which is much more complicated because we aren't mocking anything
    threadPool = new TestThreadPool(getTestName());
    IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("index", Settings.EMPTY);
    ShardId shardId = new ShardId(new Index("index", "_na_"), 1);
    Directory directory = newDirectory();
    DirectoryService directoryService = new DirectoryService(shardId, indexSettings) {
        @Override//from ww  w. j av a 2  s  . co  m
        public Directory newDirectory() throws IOException {
            return directory;
        }

        @Override
        public long throttleTimeInNanos() {
            return 0;
        }
    };
    store = new Store(shardId, indexSettings, directoryService, new DummyShardLock(shardId));
    IndexWriterConfig iwc = newIndexWriterConfig();
    TranslogConfig translogConfig = new TranslogConfig(shardId, createTempDir("translog"), indexSettings,
            BigArrays.NON_RECYCLING_INSTANCE);
    Engine.EventListener eventListener = new Engine.EventListener() {
        @Override
        public void onFailedEngine(String reason, @Nullable Exception e) {
            // we don't need to notify anybody in this test
        }
    };
    EngineConfig config = new EngineConfig(EngineConfig.OpenMode.CREATE_INDEX_AND_TRANSLOG, shardId, threadPool,
            indexSettings, null, store, new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy()),
            newMergePolicy(), iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger),
            eventListener, new TranslogHandler(shardId.getIndexName(), logger),
            IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig,
            TimeValue.timeValueMinutes(5), listeners);
    engine = new InternalEngine(config);
}

From source file:org.ojbc.adapters.analyticaldatastore.personid.IndexedIdentifierGenerationStrategy.java

License:RPL License

private void init() throws Exception {
    Directory indexDirectory = FSDirectory.open(new File(indexDirectoryPath));
    log.info("Set Lucene index directory to " + indexDirectory.toString());
    Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_47);
    IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_47, analyzer);
    config.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND);
    config.setIndexDeletionPolicy(new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy()));
    indexWriter = new IndexWriter(indexDirectory, config);
}