List of usage examples for org.apache.lucene.util.packed PackedInts COMPACT
float COMPACT
To view the source code for org.apache.lucene.util.packed PackedInts COMPACT.
Click Source Link
From source file:com.lucure.core.codec.LucurePostingsWriter.java
License:Apache License
/** Creates a postings writer with <code>PackedInts.COMPACT</code> */ public LucurePostingsWriter(SegmentWriteState state) throws IOException { this(state, PackedInts.COMPACT); }
From source file:org.elasticsearch.index.fielddata.ordinals.InternalGlobalOrdinalsBuilder.java
License:Apache License
@Override public IndexFieldData.WithOrdinals build(final IndexReader indexReader, IndexFieldData.WithOrdinals indexFieldData, Settings settings, CircuitBreakerService breakerService) throws IOException { assert indexReader.leaves().size() > 1; long startTime = System.currentTimeMillis(); // It makes sense to make the overhead ratio configurable for the mapping from segment ords to global ords // However, other mappings are never the bottleneck and only used to get the original value from an ord, so // it makes sense to force COMPACT for them final float acceptableOverheadRatio = settings.getAsFloat("acceptable_overhead_ratio", PackedInts.FAST); final AppendingPackedLongBuffer globalOrdToFirstSegment = new AppendingPackedLongBuffer(PackedInts.COMPACT); final MonotonicAppendingLongBuffer globalOrdToFirstSegmentDelta = new MonotonicAppendingLongBuffer( PackedInts.COMPACT);/*w w w.jav a 2 s. c om*/ FieldDataType fieldDataType = indexFieldData.getFieldDataType(); int defaultThreshold = settings.getAsInt(ORDINAL_MAPPING_THRESHOLD_INDEX_SETTING_KEY, ORDINAL_MAPPING_THRESHOLD_DEFAULT); int threshold = fieldDataType.getSettings().getAsInt(ORDINAL_MAPPING_THRESHOLD_KEY, defaultThreshold); OrdinalMappingSourceBuilder ordinalMappingBuilder = new OrdinalMappingSourceBuilder( indexReader.leaves().size(), acceptableOverheadRatio, threshold); long currentGlobalOrdinal = 0; final AtomicFieldData.WithOrdinals[] withOrdinals = new AtomicFieldData.WithOrdinals[indexReader.leaves() .size()]; TermIterator termIterator = new TermIterator(indexFieldData, indexReader.leaves(), withOrdinals); for (BytesRef term = termIterator.next(); term != null; term = termIterator.next()) { globalOrdToFirstSegment.add(termIterator.firstReaderIndex()); long globalOrdinalDelta = currentGlobalOrdinal - termIterator.firstLocalOrdinal(); globalOrdToFirstSegmentDelta.add(globalOrdinalDelta); for (TermIterator.LeafSource leafSource : termIterator.competitiveLeafs()) { ordinalMappingBuilder.onOrdinal(leafSource.context.ord, leafSource.tenum.ord(), currentGlobalOrdinal); } currentGlobalOrdinal++; } // ram used for the globalOrd to segmentOrd and segmentOrd to firstReaderIndex lookups long memorySizeInBytesCounter = 0; globalOrdToFirstSegment.freeze(); memorySizeInBytesCounter += globalOrdToFirstSegment.ramBytesUsed(); globalOrdToFirstSegmentDelta.freeze(); memorySizeInBytesCounter += globalOrdToFirstSegmentDelta.ramBytesUsed(); final long maxOrd = currentGlobalOrdinal; OrdinalMappingSource[] segmentOrdToGlobalOrdLookups = ordinalMappingBuilder.build(maxOrd); // add ram used for the main segmentOrd to globalOrd lookups memorySizeInBytesCounter += ordinalMappingBuilder.getMemorySizeInBytes(); final long memorySizeInBytes = memorySizeInBytesCounter; breakerService.getBreaker().addWithoutBreaking(memorySizeInBytes); if (logger.isDebugEnabled()) { // this does include the [] from the array in the impl name String implName = segmentOrdToGlobalOrdLookups.getClass().getSimpleName(); logger.debug("Global-ordinals[{}][{}][{}] took {} ms", implName, indexFieldData.getFieldNames().fullName(), maxOrd, (System.currentTimeMillis() - startTime)); } return new InternalGlobalOrdinalsIndexFieldData(indexFieldData.index(), settings, indexFieldData.getFieldNames(), fieldDataType, withOrdinals, globalOrdToFirstSegment, globalOrdToFirstSegmentDelta, segmentOrdToGlobalOrdLookups, memorySizeInBytes); }
From source file:org.elasticsearch.index.fielddata.plain.GeoPointCompressedIndexFieldData.java
License:Apache License
@Override public AtomicGeoPointFieldData<ScriptDocValues> loadDirect(AtomicReaderContext context) throws Exception { AtomicReader reader = context.reader(); Terms terms = reader.terms(getFieldNames().indexName()); AtomicGeoPointFieldData data = null; // TODO: Use an actual estimator to estimate before loading. NonEstimatingEstimator estimator = new NonEstimatingEstimator(breakerService.getBreaker()); if (terms == null) { data = new Empty(reader.maxDoc()); estimator.afterLoad(null, data.getMemorySizeInBytes()); return data; }/*from w w w . j a v a 2s .co m*/ final long initialSize; if (terms.size() >= 0) { initialSize = 1 + terms.size(); } else { // codec doesn't expose size initialSize = 1 + Math.min(1 << 12, reader.maxDoc()); } final int pageSize = Integer .highestOneBit(BigArrays.PAGE_SIZE_IN_BYTES * 8 / encoding.numBitsPerCoordinate() - 1) << 1; PagedMutable lat = new PagedMutable(initialSize, pageSize, encoding.numBitsPerCoordinate(), PackedInts.COMPACT); PagedMutable lon = new PagedMutable(initialSize, pageSize, encoding.numBitsPerCoordinate(), PackedInts.COMPACT); final float acceptableTransientOverheadRatio = fieldDataType.getSettings().getAsFloat( "acceptable_transient_overhead_ratio", OrdinalsBuilder.DEFAULT_ACCEPTABLE_OVERHEAD_RATIO); OrdinalsBuilder builder = new OrdinalsBuilder(terms.size(), reader.maxDoc(), acceptableTransientOverheadRatio); boolean success = false; try { final GeoPointEnum iter = new GeoPointEnum(builder.buildFromTerms(terms.iterator(null))); GeoPoint point; long ord = 0; while ((point = iter.next()) != null) { ++ord; if (lat.size() <= ord) { final long newSize = BigArrays.overSize(ord + 1); lat = lat.resize(newSize); lon = lon.resize(newSize); } lat.set(ord, encoding.encodeCoordinate(point.getLat())); lon.set(ord, encoding.encodeCoordinate(point.getLon())); } Ordinals build = builder.build(fieldDataType.getSettings()); if (!build.isMultiValued() && CommonSettings.removeOrdsOnSingleValue(fieldDataType)) { Docs ordinals = build.ordinals(); int maxDoc = reader.maxDoc(); PagedMutable sLat = new PagedMutable(reader.maxDoc(), pageSize, encoding.numBitsPerCoordinate(), PackedInts.COMPACT); PagedMutable sLon = new PagedMutable(reader.maxDoc(), pageSize, encoding.numBitsPerCoordinate(), PackedInts.COMPACT); for (int i = 0; i < maxDoc; i++) { final long nativeOrdinal = ordinals.getOrd(i); sLat.set(i, lat.get(nativeOrdinal)); sLon.set(i, lon.get(nativeOrdinal)); } FixedBitSet set = builder.buildDocsWithValuesSet(); if (set == null) { data = new GeoPointCompressedAtomicFieldData.Single(encoding, sLon, sLat, reader.maxDoc(), ordinals.getNumOrds()); } else { data = new GeoPointCompressedAtomicFieldData.SingleFixedSet(encoding, sLon, sLat, reader.maxDoc(), set, ordinals.getNumOrds()); } } else { if (lat.size() != build.getMaxOrd()) { lat = lat.resize(build.getMaxOrd()); lon = lon.resize(build.getMaxOrd()); } data = new GeoPointCompressedAtomicFieldData.WithOrdinals(encoding, lon, lat, reader.maxDoc(), build); } success = true; return data; } finally { if (success) { estimator.afterLoad(null, data.getMemorySizeInBytes()); } builder.close(); } }