Example usage for org.apache.lucene.index KeepOnlyLastCommitDeletionPolicy KeepOnlyLastCommitDeletionPolicy

List of usage examples for org.apache.lucene.index KeepOnlyLastCommitDeletionPolicy KeepOnlyLastCommitDeletionPolicy

Introduction

In this page you can find the example usage for org.apache.lucene.index KeepOnlyLastCommitDeletionPolicy KeepOnlyLastCommitDeletionPolicy.

Prototype

public KeepOnlyLastCommitDeletionPolicy() 

Source Link

Document

Sole constructor.

Usage

From source file:com.leavesfly.lia.admin.Fragments.java

License:Apache License

public void test() throws Exception {
    Directory dir = null;//  www  .  ja  va 2  s  .com
    Analyzer analyzer = null;
    // START
    IndexDeletionPolicy policy = new KeepOnlyLastCommitDeletionPolicy();
    SnapshotDeletionPolicy snapshotter = new SnapshotDeletionPolicy(policy);
    IndexWriter writer = new IndexWriter(dir, analyzer, snapshotter, IndexWriter.MaxFieldLength.UNLIMITED);
    // END

    try {
        IndexCommit commit = (IndexCommit) snapshotter.snapshot();
        Collection<String> fileNames = commit.getFileNames();
        /*<iterate over & copy files from fileNames>*/
    } finally {
        snapshotter.release();
    }
}

From source file:com.mathworks.xzheng.admin.Fragments.java

License:Apache License

public void test() throws Exception {
    Directory dir = null;//from  w  w  w .  jav a  2 s  . co m
    Analyzer analyzer = null;
    // START
    IndexDeletionPolicy policy = new KeepOnlyLastCommitDeletionPolicy();
    SnapshotDeletionPolicy snapshotter = new SnapshotDeletionPolicy(policy);

    IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_46, analyzer);
    config.setIndexDeletionPolicy(snapshotter);
    IndexWriter writer = new IndexWriter(dir, config);
    // END

    IndexCommit commit = null;
    try {
        commit = (IndexCommit) snapshotter.snapshot();
        Collection<String> fileNames = commit.getFileNames();
        /*<iterate over & copy files from fileNames>*/
    } finally {
        snapshotter.release(commit);
    }
}

From source file:com.nearinfinity.blur.thrift.ThriftBlurShardServer.java

License:Apache License

public static ThriftServer createServer(int serverIndex, BlurConfiguration configuration) throws Exception {
    // setup block cache
    // 134,217,728 is the slab size, therefore there are 16,384 blocks
    // in a slab when using a block size of 8,192
    int numberOfBlocksPerSlab = 16384;
    int blockSize = BlockDirectory.BLOCK_SIZE;
    int slabCount = configuration.getInt(BLUR_SHARD_BLOCKCACHE_SLAB_COUNT, 1);
    Cache cache;/*from w w  w  . ja v  a  2 s . co m*/
    Configuration config = new Configuration();
    BlurMetrics blurMetrics = new BlurMetrics(config);
    if (slabCount >= 1) {
        BlockCache blockCache;
        boolean directAllocation = configuration.getBoolean(BLUR_SHARD_BLOCKCACHE_DIRECT_MEMORY_ALLOCATION,
                true);

        int slabSize = numberOfBlocksPerSlab * blockSize;
        LOG.info("Number of slabs of block cache [{0}] with direct memory allocation set to [{1}]", slabCount,
                directAllocation);
        LOG.info(
                "Block cache target memory usage, slab size of [{0}] will allocate [{1}] slabs and use ~[{2}] bytes",
                slabSize, slabCount, ((long) slabCount * (long) slabSize));

        BufferStore.init(configuration, blurMetrics);

        try {
            long totalMemory = (long) slabCount * (long) numberOfBlocksPerSlab * (long) blockSize;
            blockCache = new BlockCache(blurMetrics, directAllocation, totalMemory, slabSize, blockSize);
        } catch (OutOfMemoryError e) {
            if ("Direct buffer memory".equals(e.getMessage())) {
                System.err.println(
                        "The max direct memory is too low.  Either increase by setting (-XX:MaxDirectMemorySize=<size>g -XX:+UseLargePages) or disable direct allocation by (blur.shard.blockcache.direct.memory.allocation=false) in blur-site.properties");
                System.exit(1);
            }
            throw e;
        }
        cache = new BlockDirectoryCache(blockCache, blurMetrics);
    } else {
        cache = BlockDirectory.NO_CACHE;
    }

    String bindAddress = configuration.get(BLUR_SHARD_BIND_ADDRESS);
    int bindPort = configuration.getInt(BLUR_SHARD_BIND_PORT, -1);
    bindPort += serverIndex;

    LOG.info("Shard Server using index [{0}] bind address [{1}]", serverIndex, bindAddress + ":" + bindPort);

    String nodeNameHostName = getNodeName(configuration, BLUR_SHARD_HOSTNAME);
    String nodeName = nodeNameHostName + ":" + bindPort;
    String zkConnectionStr = isEmpty(configuration.get(BLUR_ZOOKEEPER_CONNECTION), BLUR_ZOOKEEPER_CONNECTION);

    BlurQueryChecker queryChecker = new BlurQueryChecker(configuration);

    final ZooKeeper zooKeeper = ZkUtils.newZooKeeper(zkConnectionStr);
    try {
        ZookeeperSystemTime.checkSystemTime(zooKeeper,
                configuration.getLong(BLUR_ZOOKEEPER_SYSTEM_TIME_TOLERANCE, 3000));
    } catch (KeeperException e) {
        if (e.code() == Code.CONNECTIONLOSS) {
            System.err.println("Cannot connect zookeeper to [" + zkConnectionStr + "]");
            System.exit(1);
        }
    }

    BlurUtil.setupZookeeper(zooKeeper, configuration.get(BLUR_CLUSTER_NAME));

    final ZookeeperClusterStatus clusterStatus = new ZookeeperClusterStatus(zooKeeper);

    final BlurIndexRefresher refresher = new BlurIndexRefresher();
    refresher.init();

    BlurFilterCache filterCache = getFilterCache(configuration);
    BlurIndexWarmup indexWarmup = getIndexWarmup(configuration);
    IndexDeletionPolicy indexDeletionPolicy = new KeepOnlyLastCommitDeletionPolicy();

    final DistributedIndexServer indexServer = new DistributedIndexServer();
    indexServer.setBlurMetrics(blurMetrics);
    indexServer.setCache(cache);
    indexServer.setClusterStatus(clusterStatus);
    indexServer.setConfiguration(config);
    indexServer.setNodeName(nodeName);
    indexServer.setRefresher(refresher);
    indexServer.setShardOpenerThreadCount(configuration.getInt(BLUR_SHARD_OPENER_THREAD_COUNT, 16));
    indexServer.setZookeeper(zooKeeper);
    indexServer.setFilterCache(filterCache);
    indexServer.setSafeModeDelay(configuration.getLong(BLUR_SHARD_SAFEMODEDELAY, 60000));
    indexServer.setWarmup(indexWarmup);
    indexServer.setIndexDeletionPolicy(indexDeletionPolicy);
    indexServer.setTimeBetweenCommits(configuration.getLong(BLUR_SHARD_TIME_BETWEEN_COMMITS, 60000));
    indexServer.setTimeBetweenRefreshs(configuration.getLong(BLUR_SHARD_TIME_BETWEEN_REFRESHS, 500));
    indexServer.init();

    final IndexManager indexManager = new IndexManager();
    indexManager.setIndexServer(indexServer);
    indexManager.setMaxClauseCount(configuration.getInt(BLUR_MAX_CLAUSE_COUNT, 1024));
    indexManager.setThreadCount(configuration.getInt(BLUR_INDEXMANAGER_SEARCH_THREAD_COUNT, 32));
    indexManager.setBlurMetrics(blurMetrics);
    indexManager.setFilterCache(filterCache);
    indexManager.init();

    final BlurShardServer shardServer = new BlurShardServer();
    shardServer.setIndexServer(indexServer);
    shardServer.setIndexManager(indexManager);
    shardServer.setZookeeper(zooKeeper);
    shardServer.setClusterStatus(clusterStatus);
    shardServer.setDataFetchThreadCount(configuration.getInt(BLUR_SHARD_DATA_FETCH_THREAD_COUNT, 8));
    shardServer.setMaxQueryCacheElements(configuration.getInt(BLUR_SHARD_CACHE_MAX_QUERYCACHE_ELEMENTS, 128));
    shardServer.setMaxTimeToLive(
            configuration.getLong(BLUR_SHARD_CACHE_MAX_TIMETOLIVE, TimeUnit.MINUTES.toMillis(1)));
    shardServer.setQueryChecker(queryChecker);
    shardServer.init();

    Iface iface = BlurUtil.recordMethodCallsAndAverageTimes(blurMetrics, shardServer, Iface.class);

    int threadCount = configuration.getInt(BLUR_SHARD_SERVER_THRIFT_THREAD_COUNT, 32);

    final ThriftBlurShardServer server = new ThriftBlurShardServer();
    server.setNodeName(nodeName);
    server.setBindAddress(bindAddress);
    server.setBindPort(bindPort);
    server.setThreadCount(threadCount);
    server.setIface(iface);
    server.setConfiguration(configuration);

    int baseGuiPort = Integer.parseInt(configuration.get(BLUR_GUI_SHARD_PORT));
    final HttpJettyServer httpServer;
    if (baseGuiPort > 0) {
        int webServerPort = baseGuiPort + serverIndex;

        // TODO: this got ugly, there has to be a better way to handle all these
        // params
        // without reversing the mvn dependancy and making blur-gui on top.
        httpServer = new HttpJettyServer(bindPort, webServerPort,
                configuration.getInt(BLUR_CONTROLLER_BIND_PORT, -1),
                configuration.getInt(BLUR_SHARD_BIND_PORT, -1),
                configuration.getInt(BLUR_GUI_CONTROLLER_PORT, -1),
                configuration.getInt(BLUR_GUI_SHARD_PORT, -1), "shard", blurMetrics);
    } else {
        httpServer = null;
    }

    // This will shutdown the server when the correct path is set in zk
    BlurShutdown shutdown = new BlurShutdown() {
        @Override
        public void shutdown() {
            ThreadWatcher threadWatcher = ThreadWatcher.instance();
            quietClose(refresher, server, shardServer, indexManager, indexServer, threadWatcher, clusterStatus,
                    zooKeeper, httpServer);
        }
    };
    server.setShutdown(shutdown);
    new BlurServerShutDown().register(shutdown, zooKeeper);
    return server;
}

From source file:com.sensei.indexing.hadoop.keyvalueformat.IntermediateForm.java

License:Apache License

private IndexWriter createWriter() throws IOException {
    IndexWriter writer =//from   www.j a va  2 s .  com
            //        new IndexWriter(dir, false, null, new KeepOnlyLastCommitDeletionPolicy());
            new IndexWriter(dir, null, new KeepOnlyLastCommitDeletionPolicy(), MaxFieldLength.UNLIMITED);
    writer.setUseCompoundFile(true); //use compound file fortmat to speed up;

    if (conf != null) {
        int maxFieldLength = conf.getInt(SenseiJobConfig.MAX_FIELD_LENGTH, -1);
        if (maxFieldLength > 0) {
            writer.setMaxFieldLength(maxFieldLength);
        }
    }

    return writer;
}

From source file:com.sensei.indexing.hadoop.reduce.ShardWriter.java

License:Apache License

/**
 * Constructor//  w ww .  j a va  2s .  co  m
 * @param fs
 * @param shard
 * @param tempDir
 * @param iconf
 * @throws IOException
 */
public ShardWriter(FileSystem fs, Shard shard, String tempDir, Configuration iconf) throws IOException {
    logger.info("Construct a shard writer");

    this.iconf = iconf;
    this.fs = fs;
    localFs = FileSystem.getLocal(iconf);
    perm = new Path(shard.getDirectory());
    temp = new Path(tempDir);

    long initGeneration = shard.getGeneration();

    if (localFs.exists(temp)) {
        File tempFile = new File(temp.getName());
        if (tempFile.exists())
            SenseiReducer.deleteDir(tempFile);
    }

    if (!fs.exists(perm)) {
        assert (initGeneration < 0);
        fs.mkdirs(perm);
    } else {
        moveToTrash(iconf, perm);
        fs.mkdirs(perm);
        //      restoreGeneration(fs, perm, initGeneration);
    }
    //    dir =  //new FileSystemDirectory(fs, perm, false, iconf.getConfiguration());
    //        new MixedDirectory(fs, perm, localFs, fs.startLocalOutput(perm, temp),
    //            iconf);

    // analyzer is null because we only use addIndexes, not addDocument
    //    writer =
    //        new IndexWriter(dir, null, 
    //              initGeneration < 0 ? new KeepOnlyLastCommitDeletionPolicy() : new MixedDeletionPolicy(), 
    //                    MaxFieldLength.UNLIMITED);

    //    writer =  new IndexWriter(dir, null, new KeepOnlyLastCommitDeletionPolicy(), MaxFieldLength.UNLIMITED);
    writer = new IndexWriter(FSDirectory.open(new File(tempDir)), null, new KeepOnlyLastCommitDeletionPolicy(),
            MaxFieldLength.UNLIMITED);
    setParameters(iconf);
    //    dir = null;
    //    writer = null;

}

From source file:com.vmware.dcp.services.common.LuceneDocumentIndexService.java

License:Open Source License

public IndexWriter createWriter(File directory, boolean doUpgrade) throws Exception {
    Directory dir = MMapDirectory.open(directory.toPath());
    Analyzer analyzer = new SimpleAnalyzer();

    // Upgrade the index in place if necessary.
    if (doUpgrade && DirectoryReader.indexExists(dir)) {
        upgradeIndex(dir);/*from   ww w .  j a  v  a2  s  .c om*/
    }

    IndexWriterConfig iwc = new IndexWriterConfig(analyzer);
    iwc.setOpenMode(OpenMode.CREATE_OR_APPEND);
    iwc.setIndexDeletionPolicy(new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy()));
    Long totalMBs = getHost().getServiceMemoryLimitMB(getSelfLink(), MemoryLimitType.EXACT);
    if (totalMBs != null) {
        // give half to the index, the other half we keep for service caching context
        totalMBs = Math.max(1, totalMBs / 2);
        iwc.setRAMBufferSizeMB(totalMBs);
    }

    this.writer = new IndexWriter(dir, iwc);
    this.writer.commit();
    this.indexUpdateTimeMicros = Utils.getNowMicrosUtc();
    this.indexWriterCreationTimeMicros = this.indexUpdateTimeMicros;
    return this.writer;
}

From source file:com.vmware.xenon.services.common.LuceneDocumentIndexService.java

License:Open Source License

public IndexWriter createWriter(File directory, boolean doUpgrade) throws Exception {
    Analyzer analyzer = new SimpleAnalyzer();
    IndexWriterConfig iwc = new IndexWriterConfig(analyzer);
    Long totalMBs = getHost().getServiceMemoryLimitMB(getSelfLink(), MemoryLimitType.EXACT);
    if (totalMBs != null) {
        long cacheSizeMB = (totalMBs * 3) / 4;
        cacheSizeMB = Math.max(1, cacheSizeMB);
        iwc.setRAMBufferSizeMB(cacheSizeMB);
        this.linkAccessMemoryLimitMB = totalMBs / 4;
    }/*from  w  ww .  ja  va  2  s  .  c  o m*/

    Directory dir = MMapDirectory.open(directory.toPath());

    // Upgrade the index in place if necessary.
    if (doUpgrade && DirectoryReader.indexExists(dir)) {
        upgradeIndex(dir);
    }

    iwc.setOpenMode(OpenMode.CREATE_OR_APPEND);
    iwc.setIndexDeletionPolicy(new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy()));

    IndexWriter w = new IndexWriter(dir, iwc);
    w.commit();

    synchronized (this.searchSync) {
        this.writer = w;
        this.linkAccessTimes.clear();
        this.indexUpdateTimeMicros = Utils.getNowMicrosUtc();
        this.indexWriterCreationTimeMicros = this.indexUpdateTimeMicros;
    }
    return this.writer;
}

From source file:com.xiaomi.linden.hadoop.indexing.keyvalueformat.IntermediateForm.java

License:Apache License

private void createWriter() throws IOException {
    IndexWriterConfig config = new IndexWriterConfig(Version.LATEST, null);
    config.setIndexDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
    writer = new IndexWriter(dir, config);
    taxoWriter = new DirectoryTaxonomyWriter(taxoDir);
}

From source file:com.xiaomi.linden.hadoop.indexing.reduce.ShardWriter.java

License:Apache License

/**
 * Constructor/* ww  w  .ja v  a 2s  . c  om*/
 * @param fs
 * @param shard
 * @param tempDir
 * @param conf
 * @throws IOException
 */
public ShardWriter(FileSystem fs, Shard shard, String tempDir, Configuration conf) throws IOException {
    logger.info("Construct a shard writer");

    this.conf = conf;
    this.fs = fs;
    localFs = FileSystem.getLocal(conf);
    perm = new Path(shard.getDirectory());
    taxoPerm = new Path(shard.getDirectory() + ".taxonomy");
    String indexDir = tempDir + "/" + "index";
    String taxoDir = tempDir + "/" + "taxo";
    temp = new Path(indexDir);
    taxoTemp = new Path(taxoDir);

    if (localFs.exists(temp)) {
        File tempFile = new File(temp.getName());
        if (tempFile.exists()) {
            LindenReducer.deleteDir(tempFile);
        }
    }

    if (!fs.exists(perm)) {
        fs.mkdirs(perm);
    } else {
        moveToTrash(conf, perm);
        fs.mkdirs(perm);
    }

    if (!fs.exists(taxoPerm)) {
        fs.mkdirs(taxoPerm);
    } else {
        moveToTrash(conf, taxoPerm);
        fs.mkdirs(taxoPerm);
    }
    IndexWriterConfig config = new IndexWriterConfig(Version.LATEST, null);
    config.setIndexDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
    writer = new IndexWriter(FSDirectory.open(new File(indexDir)), config);
    taxoWriter = new DirectoryTaxonomyWriter(FSDirectory.open(new File(taxoDir)));
}

From source file:gov.nasa.ensemble.core.plan.editor.search.PlanIndexer.java

License:Open Source License

/**
 * Constructor for PlanIndexer/*  w ww.java  2 s .c o  m*/
 */
public PlanIndexer() {
    booleanAttributes = new Vector<String>();
    attributes = new Vector<String>();
    dir = new RAMDirectory();

    try {
        writer = new IndexWriter(dir, analyzer, true, new KeepOnlyLastCommitDeletionPolicy(),
                new MaxFieldLength(IndexWriter.DEFAULT_MAX_FIELD_LENGTH));
    } catch (IOException e) {
        System.out.println("IOException in opening IndexWriter: " + e.getMessage());
    }
}