Example usage for org.apache.lucene.util RamUsageEstimator sizeOf

List of usage examples for org.apache.lucene.util RamUsageEstimator sizeOf

Introduction

In this page you can find the example usage for org.apache.lucene.util RamUsageEstimator sizeOf.

Prototype

public static long sizeOf(Accountable[] accountables) 

Source Link

Document

Return the size of the provided array of Accountable s by summing up the shallow size of the array and the Accountable#ramBytesUsed() memory usage reported by each Accountable .

Usage

From source file:com.lucure.core.codec.CompressingStoredFieldsIndexReader.java

License:Apache License

@Override
public long ramBytesUsed() {
    long res = BASE_RAM_BYTES_USED;

    res += RamUsageEstimator.shallowSizeOf(docBasesDeltas);
    for (PackedInts.Reader r : docBasesDeltas) {
        res += r.ramBytesUsed();/*from   w w w.  j av  a  2s.c  o m*/
    }
    res += RamUsageEstimator.shallowSizeOf(startPointersDeltas);
    for (PackedInts.Reader r : startPointersDeltas) {
        res += r.ramBytesUsed();
    }

    res += RamUsageEstimator.sizeOf(docBases);
    res += RamUsageEstimator.sizeOf(startPointers);
    res += RamUsageEstimator.sizeOf(avgChunkDocs);
    res += RamUsageEstimator.sizeOf(avgChunkSizes);

    return res;
}

From source file:com.o19s.es.ltr.ranker.dectree.NaiveAdditiveDecisionTree.java

License:Apache License

/**
 * Return the memory usage of this object in bytes. Negative values are illegal.
 *//*from  w ww  .ja v  a  2s  . c  o  m*/
@Override
public long ramBytesUsed() {
    return BASE_RAM_USED + RamUsageEstimator.sizeOf(weights) + RamUsageEstimator.sizeOf(trees);
}

From source file:com.o19s.es.ltr.ranker.linear.LinearRanker.java

License:Apache License

/**
 * Return the memory usage of this object in bytes. Negative values are illegal.
 *//*from   ww  w  . jav a 2s  . c o  m*/
@Override
public long ramBytesUsed() {
    return RamUsageEstimator.NUM_BYTES_OBJECT_HEADER + RamUsageEstimator.sizeOf(weights);
}

From source file:monad.face.internal.MonadSparseFixedBitSet.java

License:Apache License

private void insertLong(int i4096, int i64, int i, long index) {
    indices[i4096] |= 1L << i64; // shifts are mod 64 in java
    // we count the number of bits that are set on the right of i64
    // this gives us the index at which to perform the insertion
    final int o = Long.bitCount(index & ((1L << i64) - 1));
    final long[] bitArray = bits[i4096];
    if (bitArray[bitArray.length - 1] == 0) {
        // since we only store non-zero longs, if the last value is 0, it means
        // that we alreay have extra space, make use of it
        System.arraycopy(bitArray, o, bitArray, o + 1, bitArray.length - o - 1);
        bitArray[o] = 1L << i;/*from w w w .  j a v  a  2 s. co  m*/
    } else {
        // we don't have extra space so we need to resize to insert the new long
        final int newSize = oversize(bitArray.length + 1);
        final long[] newBitArray = new long[newSize];
        System.arraycopy(bitArray, 0, newBitArray, 0, o);
        newBitArray[o] = 1L << i;
        System.arraycopy(bitArray, o, newBitArray, o + 1, bitArray.length - o);
        bits[i4096] = newBitArray;
        ramBytesUsed += RamUsageEstimator.sizeOf(newBitArray) - RamUsageEstimator.sizeOf(bitArray);
    }
    ++nonZeroLongCount;
}

From source file:org.elasticsearch.index.cache.id.simple.SimpleIdReaderTypeCache.java

License:Apache License

long computeSizeInBytes() {
    long sizeInBytes = 0;
    // Ignore type field
    //  sizeInBytes += ((type.length() * RamUsage.NUM_BYTES_CHAR) + (3 * RamUsage.NUM_BYTES_INT)) + RamUsage.NUM_BYTES_OBJECT_HEADER;
    sizeInBytes += RamUsageEstimator.NUM_BYTES_ARRAY_HEADER
            + (idToDoc._valuesSize() * RamUsageEstimator.NUM_BYTES_INT);
    for (Object o : idToDoc._set) {
        if (o == TObjectHash.FREE || o == TObjectHash.REMOVED) {
            sizeInBytes += RamUsageEstimator.NUM_BYTES_OBJECT_REF;
        } else {/* w  w  w  .  j a  v  a  2  s. c o  m*/
            HashedBytesArray bytesArray = (HashedBytesArray) o;
            sizeInBytes += RamUsageEstimator.NUM_BYTES_OBJECT_HEADER
                    + (bytesArray.length() + RamUsageEstimator.NUM_BYTES_INT);
        }
    }

    // The docIdToId array contains references to idToDoc for this segment or other segments, so we can use OBJECT_REF
    sizeInBytes += RamUsageEstimator.NUM_BYTES_ARRAY_HEADER
            + (RamUsageEstimator.NUM_BYTES_OBJECT_REF * docIdToId.length);
    for (HashedBytesArray bytesArray : parentIdsValues) {
        if (bytesArray == null) {
            sizeInBytes += RamUsageEstimator.NUM_BYTES_OBJECT_REF;
        } else {
            sizeInBytes += RamUsageEstimator.NUM_BYTES_OBJECT_HEADER
                    + (bytesArray.length() + RamUsageEstimator.NUM_BYTES_INT);
        }
    }
    sizeInBytes += RamUsageEstimator.sizeOf(parentIdsOrdinals);

    return sizeInBytes;
}

From source file:org.elasticsearch.index.fielddata.plain.DoubleArrayIndexFieldData.java

License:Apache License

@Override
public DoubleArrayAtomicFieldData loadDirect(AtomicReaderContext context) throws Exception {

    AtomicReader reader = context.reader();
    Terms terms = reader.terms(getFieldNames().indexName());
    DoubleArrayAtomicFieldData data = null;
    // TODO: Use an actual estimator to estimate before loading.
    NonEstimatingEstimator estimator = new NonEstimatingEstimator(breakerService.getBreaker());
    if (terms == null) {
        data = DoubleArrayAtomicFieldData.empty(reader.maxDoc());
        estimator.afterLoad(null, data.getMemorySizeInBytes());
        return data;
    }/*from   ww w. jav  a 2s. c o  m*/
    // TODO: how can we guess the number of terms? numerics end up creating more terms per value...
    final BigDoubleArrayList values = new BigDoubleArrayList();

    values.add(0); // first "t" indicates null value
    final float acceptableTransientOverheadRatio = fieldDataType.getSettings().getAsFloat(
            "acceptable_transient_overhead_ratio", OrdinalsBuilder.DEFAULT_ACCEPTABLE_OVERHEAD_RATIO);
    OrdinalsBuilder builder = new OrdinalsBuilder(reader.maxDoc(), acceptableTransientOverheadRatio);
    boolean success = false;
    try {
        final BytesRefIterator iter = builder
                .buildFromTerms(getNumericType().wrapTermsEnum(terms.iterator(null)));
        BytesRef term;
        while ((term = iter.next()) != null) {
            values.add(NumericUtils.sortableLongToDouble(NumericUtils.prefixCodedToLong(term)));
        }
        Ordinals build = builder.build(fieldDataType.getSettings());
        if (!build.isMultiValued() && CommonSettings.removeOrdsOnSingleValue(fieldDataType)) {
            Docs ordinals = build.ordinals();
            final FixedBitSet set = builder.buildDocsWithValuesSet();

            // there's sweet spot where due to low unique value count, using ordinals will consume less memory
            long singleValuesArraySize = reader.maxDoc() * RamUsageEstimator.NUM_BYTES_DOUBLE + (set == null ? 0
                    : RamUsageEstimator.sizeOf(set.getBits()) + RamUsageEstimator.NUM_BYTES_INT);
            long uniqueValuesArraySize = values.sizeInBytes();
            long ordinalsSize = build.getMemorySizeInBytes();
            if (uniqueValuesArraySize + ordinalsSize < singleValuesArraySize) {
                data = new DoubleArrayAtomicFieldData.WithOrdinals(values, reader.maxDoc(), build);
                success = true;
                return data;
            }

            int maxDoc = reader.maxDoc();
            BigDoubleArrayList sValues = new BigDoubleArrayList(maxDoc);
            for (int i = 0; i < maxDoc; i++) {
                sValues.add(values.get(ordinals.getOrd(i)));
            }
            assert sValues.size() == maxDoc;
            if (set == null) {
                data = new DoubleArrayAtomicFieldData.Single(sValues, maxDoc, ordinals.getNumOrds());
            } else {
                data = new DoubleArrayAtomicFieldData.SingleFixedSet(sValues, maxDoc, set,
                        ordinals.getNumOrds());
            }
        } else {
            data = new DoubleArrayAtomicFieldData.WithOrdinals(values, reader.maxDoc(), build);
        }
        success = true;
        return data;
    } finally {
        if (success) {
            estimator.afterLoad(null, data.getMemorySizeInBytes());
        }
        builder.close();
    }

}

From source file:org.elasticsearch.index.fielddata.plain.FloatArrayIndexFieldData.java

License:Apache License

@Override
public FloatArrayAtomicFieldData loadDirect(AtomicReaderContext context) throws Exception {
    AtomicReader reader = context.reader();
    Terms terms = reader.terms(getFieldNames().indexName());
    FloatArrayAtomicFieldData data = null;
    // TODO: Use an actual estimator to estimate before loading.
    NonEstimatingEstimator estimator = new NonEstimatingEstimator(breakerService.getBreaker());
    if (terms == null) {
        data = FloatArrayAtomicFieldData.empty(reader.maxDoc());
        estimator.afterLoad(null, data.getMemorySizeInBytes());
        return data;
    }/*  w  ww.ja v  a2s.  c o  m*/
    // TODO: how can we guess the number of terms? numerics end up creating more terms per value...
    final BigFloatArrayList values = new BigFloatArrayList();

    values.add(0); // first "t" indicates null value

    final float acceptableTransientOverheadRatio = fieldDataType.getSettings().getAsFloat(
            "acceptable_transient_overhead_ratio", OrdinalsBuilder.DEFAULT_ACCEPTABLE_OVERHEAD_RATIO);
    OrdinalsBuilder builder = new OrdinalsBuilder(reader.maxDoc(), acceptableTransientOverheadRatio);
    boolean success = false;
    try {
        BytesRefIterator iter = builder.buildFromTerms(getNumericType().wrapTermsEnum(terms.iterator(null)));
        BytesRef term;
        while ((term = iter.next()) != null) {
            values.add(NumericUtils.sortableIntToFloat(NumericUtils.prefixCodedToInt(term)));
        }
        Ordinals build = builder.build(fieldDataType.getSettings());
        if (!build.isMultiValued() && CommonSettings.removeOrdsOnSingleValue(fieldDataType)) {
            Docs ordinals = build.ordinals();
            final FixedBitSet set = builder.buildDocsWithValuesSet();

            // there's sweet spot where due to low unique value count, using ordinals will consume less memory
            long singleValuesArraySize = reader.maxDoc() * RamUsageEstimator.NUM_BYTES_FLOAT + (set == null ? 0
                    : RamUsageEstimator.sizeOf(set.getBits()) + RamUsageEstimator.NUM_BYTES_INT);
            long uniqueValuesArraySize = values.sizeInBytes();
            long ordinalsSize = build.getMemorySizeInBytes();
            if (uniqueValuesArraySize + ordinalsSize < singleValuesArraySize) {
                data = new FloatArrayAtomicFieldData.WithOrdinals(values, reader.maxDoc(), build);
                success = true;
                return data;
            }

            int maxDoc = reader.maxDoc();
            BigFloatArrayList sValues = new BigFloatArrayList(maxDoc);
            for (int i = 0; i < maxDoc; i++) {
                sValues.add(values.get(ordinals.getOrd(i)));
            }
            assert sValues.size() == maxDoc;
            if (set == null) {
                data = new FloatArrayAtomicFieldData.Single(sValues, maxDoc, ordinals.getNumOrds());
            } else {
                data = new FloatArrayAtomicFieldData.SingleFixedSet(sValues, maxDoc, set,
                        ordinals.getNumOrds());
            }
        } else {
            data = new FloatArrayAtomicFieldData.WithOrdinals(values, reader.maxDoc(), build);
        }
        success = true;
        return data;
    } finally {
        if (success) {
            estimator.afterLoad(null, data.getMemorySizeInBytes());
        }
        builder.close();
    }

}