Example usage for org.apache.lucene.util Accountable ramBytesUsed

List of usage examples for org.apache.lucene.util Accountable ramBytesUsed

Introduction

In this page you can find the example usage for org.apache.lucene.util Accountable ramBytesUsed.

Prototype

long ramBytesUsed();

Source Link

Document

Return the memory usage of this object in bytes.

Usage

From source file:org.apache.solr.search.LRUCache.java

License:Apache License

@Override
public V put(K key, V value) {
    synchronized (map) {
        if (getState() == State.LIVE) {
            stats.inserts.increment();//from w w w .  ja  v  a  2  s.c om
        }

        // increment local inserts regardless of state???
        // it does make it more consistent with the current size...
        inserts++;

        // important to calc and add new ram bytes first so that removeEldestEntry can compare correctly
        long keySize = DEFAULT_RAM_BYTES_USED;
        if (maxRamBytes != Long.MAX_VALUE) {
            if (key != null && key instanceof Accountable) {
                keySize = ((Accountable) key).ramBytesUsed();
            }
            long valueSize = 0;
            if (value != null) {
                if (value instanceof Accountable) {
                    Accountable accountable = (Accountable) value;
                    valueSize = accountable.ramBytesUsed();
                } else {
                    throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
                            "Cache: " + getName() + " is configured with maxRamBytes="
                                    + RamUsageEstimator.humanReadableUnits(maxRamBytes)
                                    + " but its values do not implement org.apache.lucene.util.Accountable");
                }
            }
            ramBytesUsed += keySize + valueSize + LINKED_HASHTABLE_RAM_BYTES_PER_ENTRY;
        }
        V old = map.put(key, value);
        if (maxRamBytes != Long.MAX_VALUE && old != null) {
            long bytesToDecrement = ((Accountable) old).ramBytesUsed();
            // the key existed in the map but we added its size before the put, so let's back out
            bytesToDecrement += LINKED_HASHTABLE_RAM_BYTES_PER_ENTRY;
            if (key != null) {
                if (key instanceof Accountable) {
                    Accountable aKey = (Accountable) key;
                    bytesToDecrement += aKey.ramBytesUsed();
                } else {
                    bytesToDecrement += DEFAULT_RAM_BYTES_USED;
                }
            }
            ramBytesUsed -= bytesToDecrement;
        }
        return old;
    }
}

From source file:org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse.java

License:Apache License

static void toXContent(XContentBuilder builder, Accountable tree) throws IOException {
    builder.startObject();/*  w w w  . j a  va2s  .c o  m*/
    builder.field(Fields.DESCRIPTION, tree.toString());
    builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, new ByteSizeValue(tree.ramBytesUsed()));
    Collection<Accountable> children = tree.getChildResources();
    if (children.isEmpty() == false) {
        builder.startArray(Fields.CHILDREN);
        for (Accountable child : children) {
            toXContent(builder, child);
        }
        builder.endArray();
    }
    builder.endObject();
}

From source file:org.elasticsearch.index.cache.bitset.BitSetFilterCacheTests.java

License:Apache License

public void testListener() throws IOException {
    IndexWriter writer = new IndexWriter(new RAMDirectory(),
            new IndexWriterConfig(new StandardAnalyzer()).setMergePolicy(new LogByteSizeMergePolicy()));
    Document document = new Document();
    document.add(new StringField("field", "value", Field.Store.NO));
    writer.addDocument(document);/* w w  w .  ja v  a  2 s. co  m*/
    writer.commit();
    final DirectoryReader writerReader = DirectoryReader.open(writer, false);
    final IndexReader reader = ElasticsearchDirectoryReader.wrap(writerReader, new ShardId("test", 0));

    final AtomicLong stats = new AtomicLong();
    final AtomicInteger onCacheCalls = new AtomicInteger();
    final AtomicInteger onRemoveCalls = new AtomicInteger();

    final BitsetFilterCache cache = new BitsetFilterCache(new Index("test"), Settings.EMPTY);
    cache.setListener(new BitsetFilterCache.Listener() {
        @Override
        public void onCache(ShardId shardId, Accountable accountable) {
            onCacheCalls.incrementAndGet();
            stats.addAndGet(accountable.ramBytesUsed());
            if (writerReader != reader) {
                assertNotNull(shardId);
                assertEquals("test", shardId.index().name());
                assertEquals(0, shardId.id());
            } else {
                assertNull(shardId);
            }
        }

        @Override
        public void onRemoval(ShardId shardId, Accountable accountable) {
            onRemoveCalls.incrementAndGet();
            stats.addAndGet(-accountable.ramBytesUsed());
            if (writerReader != reader) {
                assertNotNull(shardId);
                assertEquals("test", shardId.index().name());
                assertEquals(0, shardId.id());
            } else {
                assertNull(shardId);
            }
        }
    });
    BitSetProducer filter = cache.getBitSetProducer(new TermQuery(new Term("field", "value")));
    assertThat(matchCount(filter, reader), equalTo(1));
    assertTrue(stats.get() > 0);
    assertEquals(1, onCacheCalls.get());
    assertEquals(0, onRemoveCalls.get());
    IOUtils.close(reader, writer);
    assertEquals(1, onRemoveCalls.get());
    assertEquals(0, stats.get());
}

From source file:org.elasticsearch.index.cache.request.ShardRequestCache.java

License:Apache License

public void onCached(Accountable key, Accountable value) {
    totalMetric.inc(key.ramBytesUsed() + value.ramBytesUsed());
}

From source file:org.elasticsearch.index.cache.request.ShardRequestCache.java

License:Apache License

public void onRemoval(Accountable key, Accountable value, boolean evicted) {
    if (evicted) {
        evictionsMetric.inc();//from w  w  w  . j  a  va 2 s .c o  m
    }
    long dec = 0;
    if (key != null) {
        dec += key.ramBytesUsed();
    }
    if (value != null) {
        dec += value.ramBytesUsed();
    }
    totalMetric.dec(dec);
}

From source file:org.elasticsearch.index.engine.Segment.java

License:Apache License

void writeRamTree(StreamOutput out, Accountable tree) throws IOException {
    out.writeString(tree.toString());/*from  w  w w .  j a va  2  s .  c  o m*/
    out.writeVLong(tree.ramBytesUsed());
    Collection<Accountable> children = tree.getChildResources();
    out.writeVInt(children.size());
    for (Accountable child : children) {
        writeRamTree(out, child);
    }
}

From source file:org.elasticsearch.index.fielddata.ShardFieldData.java

License:Apache License

@Override
public void onCache(ShardId shardId, MappedFieldType.Names fieldNames, FieldDataType fieldDataType,
        Accountable ramUsage) {
    totalMetric.inc(ramUsage.ramBytesUsed());
    String keyFieldName = fieldNames.indexName();
    CounterMetric total = perFieldTotals.get(keyFieldName);
    if (total != null) {
        total.inc(ramUsage.ramBytesUsed());
    } else {//from  ww w  .j  a  va 2s  .  c o m
        total = new CounterMetric();
        total.inc(ramUsage.ramBytesUsed());
        CounterMetric prev = perFieldTotals.putIfAbsent(keyFieldName, total);
        if (prev != null) {
            prev.inc(ramUsage.ramBytesUsed());
        }
    }
}