Example usage for org.apache.lucene.index LogByteSizeMergePolicy DEFAULT_MIN_MERGE_MB

List of usage examples for org.apache.lucene.index LogByteSizeMergePolicy DEFAULT_MIN_MERGE_MB

Introduction

In this page you can find the example usage for org.apache.lucene.index LogByteSizeMergePolicy DEFAULT_MIN_MERGE_MB.

Prototype

double DEFAULT_MIN_MERGE_MB

To view the source code for org.apache.lucene.index LogByteSizeMergePolicy DEFAULT_MIN_MERGE_MB.

Click Source Link

Document

Default minimum segment size.

Usage

From source file:org.compass.core.lucene.engine.merge.policy.LogByteSizeMergePolicyProvider.java

License:Apache License

public MergePolicy create(CompassSettings settings) throws SearchEngineException {
    LogByteSizeMergePolicy mergePolicy = new LogByteSizeMergePolicy();
    mergePolicy//w w  w . j  a v a2  s.  c om
            .setMaxMergeMB(settings.getSettingAsDouble(LuceneEnvironment.MergePolicy.LogByteSize.MAX_MERGE_MB,
                    LogByteSizeMergePolicy.DEFAULT_MAX_MERGE_MB));
    mergePolicy
            .setMinMergeMB(settings.getSettingAsDouble(LuceneEnvironment.MergePolicy.LogByteSize.MIN_MERGE_MB,
                    LogByteSizeMergePolicy.DEFAULT_MIN_MERGE_MB));
    return mergePolicy;
}

From source file:org.elasticsearch.index.engine.internal.InternalEngineMergeTests.java

License:Apache License

@Test
@Slow/*from w w  w .  ja  va 2 s. com*/
public void testMergesHappening() throws InterruptedException, IOException, ExecutionException {
    final int numOfShards = randomIntBetween(1, 5);
    // some settings to keep num segments low
    assertAcked(prepareCreate("test")
            .setSettings(ImmutableSettings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, numOfShards)
                    .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
                    .put(LogDocMergePolicyProvider.MIN_MERGE_DOCS_KEY, 10)
                    .put(LogDocMergePolicyProvider.MERGE_FACTORY_KEY, 5)
                    .put(LogByteSizeMergePolicy.DEFAULT_MIN_MERGE_MB, 0.5).build()));
    long id = 0;
    final int rounds = scaledRandomIntBetween(50, 300);
    logger.info("Starting rounds [{}] ", rounds);
    for (int i = 0; i < rounds; ++i) {
        final int numDocs = scaledRandomIntBetween(100, 1000);
        BulkRequestBuilder request = client().prepareBulk();
        for (int j = 0; j < numDocs; ++j) {
            request.add(Requests.indexRequest("test").type("type1").id(Long.toString(id++))
                    .source(jsonBuilder().startObject().field("l", randomLong()).endObject()));
        }
        BulkResponse response = request.execute().actionGet();
        refresh();
        assertNoFailures(response);
        IndicesStatsResponse stats = client().admin().indices().prepareStats("test").setSegments(true)
                .setMerge(true).get();
        logger.info("index round [{}] - segments {}, total merges {}, current merge {}", i,
                stats.getPrimaries().getSegments().getCount(), stats.getPrimaries().getMerge().getTotal(),
                stats.getPrimaries().getMerge().getCurrent());
    }
    final long upperNumberSegments = 2 * numOfShards * 10;
    awaitBusy(new Predicate<Object>() {
        @Override
        public boolean apply(Object input) {
            IndicesStatsResponse stats = client().admin().indices().prepareStats().setSegments(true)
                    .setMerge(true).get();
            logger.info("numshards {}, segments {}, total merges {}, current merge {}", numOfShards,
                    stats.getPrimaries().getSegments().getCount(), stats.getPrimaries().getMerge().getTotal(),
                    stats.getPrimaries().getMerge().getCurrent());
            long current = stats.getPrimaries().getMerge().getCurrent();
            long count = stats.getPrimaries().getSegments().getCount();
            return count < upperNumberSegments && current == 0;
        }
    });
    IndicesStatsResponse stats = client().admin().indices().prepareStats().setSegments(true).setMerge(true)
            .get();
    logger.info("numshards {}, segments {}, total merges {}, current merge {}", numOfShards,
            stats.getPrimaries().getSegments().getCount(), stats.getPrimaries().getMerge().getTotal(),
            stats.getPrimaries().getMerge().getCurrent());
    long count = stats.getPrimaries().getSegments().getCount();
    assertThat(count, Matchers.lessThanOrEqualTo(upperNumberSegments));
}

From source file:org.elasticsearch.index.merge.policy.BalancedSegmentMergePolicyProvider.java

License:Apache License

@Inject
public BalancedSegmentMergePolicyProvider(Store store) {
    super(store.shardId(), store.indexSettings());
    Preconditions.checkNotNull(store, "Store must be provided to merge policy");

    this.minMergeSize = componentSettings.getAsBytesSize("min_merge_size", new ByteSizeValue(
            (long) LogByteSizeMergePolicy.DEFAULT_MIN_MERGE_MB * 1024 * 1024, ByteSizeUnit.BYTES));
    this.maxMergeSize = componentSettings.getAsBytesSize("max_merge_size",
            new ByteSizeValue((long) LogByteSizeMergePolicy.DEFAULT_MAX_MERGE_MB, ByteSizeUnit.MB));
    this.mergeFactor = componentSettings.getAsInt("merge_factor", LogByteSizeMergePolicy.DEFAULT_MERGE_FACTOR);
    this.maxMergeDocs = componentSettings.getAsInt("max_merge_docs",
            LogByteSizeMergePolicy.DEFAULT_MAX_MERGE_DOCS);
    this.numLargeSegments = componentSettings.getAsInt("num_large_segments",
            BalancedSegmentMergePolicy.DEFAULT_NUM_LARGE_SEGMENTS);
    this.maxSmallSegments = componentSettings.getAsInt("max_small_segments",
            2 * LogMergePolicy.DEFAULT_MERGE_FACTOR);

    logger.debug(//from  www  . j a va 2  s  . co m
            "Using [balanced] merge policy with merge_factor[{}], min_merge_size[{}], max_merge_size[{}], max_merge_docs[{}]",
            mergeFactor, minMergeSize, maxMergeSize, maxMergeDocs);
}

From source file:org.elasticsearch.index.merge.policy.LogByteSizeMergePolicyProvider.java

License:Apache License

@Inject
public LogByteSizeMergePolicyProvider(Store store, IndexSettingsService indexSettingsService) {
    super(store);
    Preconditions.checkNotNull(store, "Store must be provided to merge policy");
    this.indexSettingsService = indexSettingsService;
    this.minMergeSize = componentSettings.getAsBytesSize("min_merge_size", new ByteSizeValue(
            (long) (LogByteSizeMergePolicy.DEFAULT_MIN_MERGE_MB * 1024 * 1024), ByteSizeUnit.BYTES));
    this.maxMergeSize = componentSettings.getAsBytesSize("max_merge_size",
            new ByteSizeValue((long) LogByteSizeMergePolicy.DEFAULT_MAX_MERGE_MB, ByteSizeUnit.MB));
    this.mergeFactor = componentSettings.getAsInt("merge_factor", LogByteSizeMergePolicy.DEFAULT_MERGE_FACTOR);
    this.maxMergeDocs = componentSettings.getAsInt("max_merge_docs",
            LogByteSizeMergePolicy.DEFAULT_MAX_MERGE_DOCS);
    this.calibrateSizeByDeletes = componentSettings.getAsBoolean("calibrate_size_by_deletes", true);
    this.asyncMerge = indexSettings.getAsBoolean("index.merge.async", true);
    logger.debug(//from  w  w  w. j a  v a2s . c  o  m
            "using [log_bytes_size] merge policy with merge_factor[{}], min_merge_size[{}], max_merge_size[{}], max_merge_docs[{}], calibrate_size_by_deletes[{}], async_merge[{}]",
            mergeFactor, minMergeSize, maxMergeSize, maxMergeDocs, calibrateSizeByDeletes, asyncMerge);

    indexSettingsService.addListener(applySettings);
}