Example usage for org.apache.lucene.util MapOfSets put

List of usage examples for org.apache.lucene.util MapOfSets put

Introduction

In this page you can find the example usage for org.apache.lucene.util MapOfSets put.

Prototype

public int put(K key, V val) 

Source Link

Document

Adds val to the Set associated with key in the Map.

Usage

From source file:org.apache.solr.uninverting.FieldCacheSanityChecker.java

License:Apache License

/**
 * Tests a CacheEntry[] for indication of "insane" cache usage.
 * <p>/*from ww w. j a  v a 2s .  co m*/
 * <B>NOTE:</b>FieldCache CreationPlaceholder objects are ignored.
 * (:TODO: is this a bad idea? are we masking a real problem?)
 * </p>
 */
public Insanity[] check(CacheEntry... cacheEntries) {
    if (null == cacheEntries || 0 == cacheEntries.length)
        return new Insanity[0];

    // the indirect mapping lets MapOfSet dedup identical valIds for us
    //
    // maps the (valId) identityhashCode of cache values to 
    // sets of CacheEntry instances
    final MapOfSets<Integer, CacheEntry> valIdToItems = new MapOfSets<>(
            new HashMap<Integer, Set<CacheEntry>>(17));
    // maps ReaderField keys to Sets of ValueIds
    final MapOfSets<ReaderField, Integer> readerFieldToValIds = new MapOfSets<>(
            new HashMap<ReaderField, Set<Integer>>(17));
    //

    // any keys that we know result in more then one valId
    final Set<ReaderField> valMismatchKeys = new HashSet<>();

    // iterate over all the cacheEntries to get the mappings we'll need
    for (int i = 0; i < cacheEntries.length; i++) {
        final CacheEntry item = cacheEntries[i];
        final Accountable val = item.getValue();

        // It's OK to have dup entries, where one is eg
        // float[] and the other is the Bits (from
        // getDocWithField())
        if (val instanceof FieldCacheImpl.BitsEntry) {
            continue;
        }

        if (val instanceof FieldCache.CreationPlaceholder)
            continue;

        final ReaderField rf = new ReaderField(item.getReaderKey(), item.getFieldName());

        final Integer valId = Integer.valueOf(System.identityHashCode(val));

        // indirect mapping, so the MapOfSet will dedup identical valIds for us
        valIdToItems.put(valId, item);
        if (1 < readerFieldToValIds.put(rf, valId)) {
            valMismatchKeys.add(rf);
        }
    }

    final List<Insanity> insanity = new ArrayList<>(valMismatchKeys.size() * 3);

    insanity.addAll(checkValueMismatch(valIdToItems, readerFieldToValIds, valMismatchKeys));
    insanity.addAll(checkSubreaders(valIdToItems, readerFieldToValIds));

    return insanity.toArray(new Insanity[insanity.size()]);
}

From source file:org.apache.solr.uninverting.FieldCacheSanityChecker.java

License:Apache License

/** 
 * Internal helper method used by check that iterates over 
 * the keys of readerFieldToValIds and generates a Collection 
 * of Insanity instances whenever two (or more) ReaderField instances are 
 * found that have an ancestry relationships.  
 *
 * @see InsanityType#SUBREADER// w w w  . j  av  a 2 s.  com
 */
private Collection<Insanity> checkSubreaders(MapOfSets<Integer, CacheEntry> valIdToItems,
        MapOfSets<ReaderField, Integer> readerFieldToValIds) {

    final List<Insanity> insanity = new ArrayList<>(23);

    Map<ReaderField, Set<ReaderField>> badChildren = new HashMap<>(17);
    MapOfSets<ReaderField, ReaderField> badKids = new MapOfSets<>(badChildren); // wrapper

    Map<Integer, Set<CacheEntry>> viToItemSets = valIdToItems.getMap();
    Map<ReaderField, Set<Integer>> rfToValIdSets = readerFieldToValIds.getMap();

    Set<ReaderField> seen = new HashSet<>(17);

    Set<ReaderField> readerFields = rfToValIdSets.keySet();
    for (final ReaderField rf : readerFields) {

        if (seen.contains(rf))
            continue;

        List<Object> kids = getAllDescendantReaderKeys(rf.readerKey);
        for (Object kidKey : kids) {
            ReaderField kid = new ReaderField(kidKey, rf.fieldName);

            if (badChildren.containsKey(kid)) {
                // we've already process this kid as RF and found other problems
                // track those problems as our own
                badKids.put(rf, kid);
                badKids.putAll(rf, badChildren.get(kid));
                badChildren.remove(kid);

            } else if (rfToValIdSets.containsKey(kid)) {
                // we have cache entries for the kid
                badKids.put(rf, kid);
            }
            seen.add(kid);
        }
        seen.add(rf);
    }

    // every mapping in badKids represents an Insanity
    for (final ReaderField parent : badChildren.keySet()) {
        Set<ReaderField> kids = badChildren.get(parent);

        List<CacheEntry> badEntries = new ArrayList<>(kids.size() * 2);

        // put parent entr(ies) in first
        {
            for (final Integer value : rfToValIdSets.get(parent)) {
                badEntries.addAll(viToItemSets.get(value));
            }
        }

        // now the entries for the descendants
        for (final ReaderField kid : kids) {
            for (final Integer value : rfToValIdSets.get(kid)) {
                badEntries.addAll(viToItemSets.get(value));
            }
        }

        CacheEntry[] badness = new CacheEntry[badEntries.size()];
        badness = badEntries.toArray(badness);

        insanity.add(new Insanity(InsanityType.SUBREADER,
                "Found caches for descendants of " + parent.toString(), badness));
    }

    return insanity;

}