List of usage examples for org.apache.solr.search SolrIndexSearcher cacheInsert
public Object cacheInsert(String cacheName, Object key, Object val)
From source file:org.alfresco.solr.AlfrescoSolrEventListener.java
License:Open Source License
@Override public void newSearcher(SolrIndexSearcher newSearcher, SolrIndexSearcher currentSearcher) { Properties p = core.getResourceLoader().getCoreProperties(); boolean doPermissionChecks = Boolean.parseBoolean(p.getProperty("alfresco.doPermissionChecks", "true")); SolrIndexReader newReader = newSearcher.getReader(); log.info("Max " + newReader.maxDoc()); log.info("Docs " + newReader.numDocs()); log.info("Deleted " + newReader.numDeletedDocs()); long startTime = System.nanoTime(); ResizeableArrayList<CacheEntry> indexedByDocId = (ResizeableArrayList<CacheEntry>) newSearcher .cacheLookup(ALFRESCO_ARRAYLIST_CACHE, KEY_DBID_LEAF_PATH_BY_DOC_ID); indexedByDocId.resize(newReader.maxDoc()); HashSet<String> globalReaders = new HashSet<String>(); OpenBitSet allLeafDocs = new OpenBitSet(newReader.maxDoc()); long[] aclIdByDocId = new long[newReader.maxDoc()]; long[] txByDocId = new long[newReader.maxDoc()]; long[] aclTxByDocId = new long[newReader.maxDoc()]; for (int i = 0; i < aclIdByDocId.length; i++) { aclIdByDocId[i] = -1;// w w w . ja v a 2 s . co m txByDocId[i] = -1; aclTxByDocId[i] = -1; } OpenBitSet deleted = new OpenBitSet(newReader.maxDoc()); OwnerIdManager ownerIdManager = new OwnerIdManager(); HashMap<Long, CacheEntry> unmatchedByDBID = new HashMap<Long, CacheEntry>(); if ((incrementalCacheRebuild) && currentSearcher != null) { ResizeableArrayList<CacheEntry> oldIndexedByDocId = (ResizeableArrayList<CacheEntry>) currentSearcher .cacheLookup(ALFRESCO_ARRAYLIST_CACHE, KEY_DBID_LEAF_PATH_BY_DOC_ID); long[] oldAclIdByDocId = (long[]) currentSearcher.cacheLookup(ALFRESCO_CACHE, KEY_ACL_ID_BY_DOC_ID); long[] oldTxIdByDocId = (long[]) currentSearcher.cacheLookup(ALFRESCO_CACHE, KEY_TX_ID_BY_DOC_ID); long[] oldAclTxIdByDocId = (long[]) currentSearcher.cacheLookup(ALFRESCO_CACHE, KEY_ACL_TX_ID_BY_DOC_ID); OpenBitSet oldAllLeafDocs = (OpenBitSet) currentSearcher.cacheLookup(ALFRESCO_CACHE, KEY_ALL_LEAF_DOCS); OwnerIdManager oldOwnerIdManager = (OwnerIdManager) currentSearcher.cacheLookup(ALFRESCO_CACHE, KEY_OWNER_ID_MANAGER); ownerIdManager.addAll(oldOwnerIdManager); ConcurrentHashMap<Long, Long> addedLeaves = (ConcurrentHashMap<Long, Long>) currentSearcher .cacheLookup(ALFRESCO_CACHE, KEY_ADDED_LEAVES); ConcurrentHashMap<Long, Long> addedAux = (ConcurrentHashMap<Long, Long>) currentSearcher .cacheLookup(ALFRESCO_CACHE, KEY_ADDED_AUX); ConcurrentHashMap<Long, Long> addedAcl = (ConcurrentHashMap<Long, Long>) currentSearcher .cacheLookup(ALFRESCO_CACHE, KEY_ADDED_ACL); ConcurrentHashMap<Long, Long> addedTx = (ConcurrentHashMap<Long, Long>) currentSearcher .cacheLookup(ALFRESCO_CACHE, KEY_ADDED_TX); ConcurrentHashMap<Long, Long> addedAclTx = (ConcurrentHashMap<Long, Long>) currentSearcher .cacheLookup(ALFRESCO_CACHE, KEY_ADDED_ACL_TX); ConcurrentHashMap<Long, Long> updatedLeaves = (ConcurrentHashMap<Long, Long>) currentSearcher .cacheLookup(ALFRESCO_CACHE, KEY_UPDATED_LEAVES); ConcurrentHashMap<Long, Long> updatedAux = (ConcurrentHashMap<Long, Long>) currentSearcher .cacheLookup(ALFRESCO_CACHE, KEY_UPDATED_AUX); ConcurrentHashMap<Long, Long> updatedAcl = (ConcurrentHashMap<Long, Long>) currentSearcher .cacheLookup(ALFRESCO_CACHE, KEY_UPDATED_ACL); ConcurrentHashMap<Long, Long> updatedTx = (ConcurrentHashMap<Long, Long>) currentSearcher .cacheLookup(ALFRESCO_CACHE, KEY_UPDATED_TX); ConcurrentHashMap<Long, Long> updatedAclTx = (ConcurrentHashMap<Long, Long>) currentSearcher .cacheLookup(ALFRESCO_CACHE, KEY_UPDATED_ACL_TX); ConcurrentHashMap<Long, Long> deletedLeaves = (ConcurrentHashMap<Long, Long>) currentSearcher .cacheLookup(ALFRESCO_CACHE, KEY_DELETED_LEAVES); ConcurrentHashMap<Long, Long> deletedAux = (ConcurrentHashMap<Long, Long>) currentSearcher .cacheLookup(ALFRESCO_CACHE, KEY_DELETED_AUX); ConcurrentHashMap<Long, Long> deletedAcl = (ConcurrentHashMap<Long, Long>) currentSearcher .cacheLookup(ALFRESCO_CACHE, KEY_DELETED_ACL); ConcurrentHashMap<Long, Long> deletedTx = (ConcurrentHashMap<Long, Long>) currentSearcher .cacheLookup(ALFRESCO_CACHE, KEY_DELETED_TX); ConcurrentHashMap<Long, Long> deletedAclTx = (ConcurrentHashMap<Long, Long>) currentSearcher .cacheLookup(ALFRESCO_CACHE, KEY_DELETED_ACL_TX); AtomicBoolean deleteAll = (AtomicBoolean) currentSearcher.cacheLookup(ALFRESCO_CACHE, KEY_DELETE_ALL); AtomicBoolean checkCache = (AtomicBoolean) currentSearcher.cacheLookup(ALFRESCO_CACHE, KEY_CHECK_CACHE); if (checkCache == null) { checkCache = new AtomicBoolean(false); } boolean hasNew = (addedLeaves.size() + addedAux.size() + addedAcl.size() + addedTx.size() + addedAclTx.size() + updatedLeaves.size() + updatedAux.size() + updatedAcl.size() + updatedTx.size() + updatedAclTx.size()) > 0; if (newReader.maxDoc() == 0) { // nothing to do } else if ((oldIndexedByDocId == null) || (oldAclIdByDocId == null) || (oldTxIdByDocId == null) || (oldAclTxIdByDocId == null) || (oldAllLeafDocs == null) || (oldOwnerIdManager == null)) { log.warn("Recover from missing cache"); buildCacheForReader(indexedByDocId, allLeafDocs, aclIdByDocId, txByDocId, aclTxByDocId, newReader, 0, newReader.maxDoc(), unmatchedByDBID, ownerIdManager); } else if (deleteAll.get()) { buildCacheForReader(indexedByDocId, allLeafDocs, aclIdByDocId, txByDocId, aclTxByDocId, newReader, 0, newReader.maxDoc(), unmatchedByDBID, ownerIdManager); } else { try { SolrIndexReader[] before = currentSearcher.getReader().getSequentialSubReaders(); SolrIndexReader[] after = newSearcher.getReader().getSequentialSubReaders(); CacheSection[] cacheSectionsBefore = SolrIndexReaderCacheSection.getCacheSections(before); CacheSection[] cacheSectionsAfter = SolrIndexReaderCacheSection.getCacheSections(after); // Copy old to new and apply deletions int currentCache = 0; for (int i = 0; i < oldAclIdByDocId.length; i++) { CacheSection section = cacheSectionsBefore[currentCache]; if (section.getStart() + section.getLength() == i) { currentCache++; if (currentCache == cacheSectionsBefore.length) { currentCache--; } section = cacheSectionsBefore[currentCache]; } CacheEntry entry = oldIndexedByDocId.get(i); if (entry != null) { if (entry.getLeaf() == i) { // Leaf if ((updatedLeaves.get(entry.dbid) == null) && (deletedLeaves.get(entry.dbid) == null)) { // leave } else { section.addDeletion(i); deleted.set(i); } } else if (entry.getPath() == i) { // Aux if ((updatedAux.get(entry.dbid) == null) && (deletedAux.get(entry.dbid) == null)) { // leave } else { section.addDeletion(i); deleted.set(i); } } } else { if ((updatedAcl.get(oldAclIdByDocId[i]) != null) || (deletedAcl.get(oldAclIdByDocId[i]) != null)) { section.addDeletion(i); deleted.set(i); } if ((updatedTx.get(oldTxIdByDocId[i]) != null) || (deletedTx.get(oldTxIdByDocId[i]) != null)) { section.addDeletion(i); deleted.set(i); } if ((updatedAclTx.get(oldAclTxIdByDocId[i]) != null) || (deletedAclTx.get(oldAclTxIdByDocId[i]) != null)) { section.addDeletion(i); deleted.set(i); } } } LinkedList<CacheMatch> operations = buildCacheUpdateOperations(hasNew, cacheSectionsBefore, cacheSectionsAfter, after); log.info("Cache operatoins ..."); for (CacheMatch match : operations) { log.info(match.toString()); } CacheUpdateTracker tracker = new CacheUpdateTracker(0, 0, 0); for (CacheMatch match : operations) { match.updateCache(tracker, oldIndexedByDocId, oldAclIdByDocId, oldTxIdByDocId, oldAclTxIdByDocId, indexedByDocId, allLeafDocs, aclIdByDocId, txByDocId, aclTxByDocId, unmatchedByDBID, deleted, newReader, ownerIdManager); } // Check unmatched int hiddenDocCount = 0; for (Long unmatchedDBID : unmatchedByDBID.keySet()) { // hidden docs appear as an unmatched path CacheEntry entry = unmatchedByDBID.get(unmatchedDBID); if ((entry.getLeaf() != 0) && (entry.getPath() == 0)) { // leaf doc with no aux doc; log.info("Leaf has no AUX doc for DBID " + unmatchedDBID + " at position " + entry.getLeaf()); } if ((entry.getLeaf() == 0) && (entry.getPath() != 0)) { hiddenDocCount++; } } log.info("Cache unindexed/error doc count = " + hiddenDocCount); // Simple position check; boolean simpleCheckOk = true; for (int i = 0; i < indexedByDocId.size(); i++) { CacheEntry entry = indexedByDocId.get(i); if (entry != null) { if ((entry.getLeaf() != i) && (entry.getPath() != i)) { log.warn("Core " + newSearcher.getIndexDir()); log.warn("Simple cache caheck failed: Incorrect indexedByDocId at " + i); log.warn(".. leaf and path doc poistion do not match the doc position .. " + indexedByDocId.get(i)); simpleCheckOk = false; break; } } } if ((simpleCheckOk == false) || forceCheckCache || checkCache.get()) { ResizeableArrayList<CacheEntry> checkIndexedByDocId = (ResizeableArrayList<CacheEntry>) currentSearcher .cacheLookup(ALFRESCO_ARRAYLIST_CACHE, KEY_CHECK_INDEXED_BY_DOC_ID_LIST); checkIndexedByDocId.resize(newReader.maxDoc()); OpenBitSet checkAllLeafDocs = new OpenBitSet(newReader.maxDoc()); long[] checkAclIdByDocId = new long[newReader.maxDoc()]; long[] checkTxIdByDocId = new long[newReader.maxDoc()]; long[] checkAclTxIdByDocId = new long[newReader.maxDoc()]; buildCacheForReader(checkIndexedByDocId, checkAllLeafDocs, checkAclIdByDocId, checkTxIdByDocId, checkAclTxIdByDocId, newReader, 0, newReader.maxDoc(), new HashMap<Long, CacheEntry>(), ownerIdManager); boolean ok = true; boolean thisTestOk = true; for (int i = 0; i < checkIndexedByDocId.size(); i++) { if (!EqualsHelper.nullSafeEquals(checkIndexedByDocId.get(i), indexedByDocId.get(i))) { if (thisTestOk) { log.warn("Core " + newSearcher.getIndexDir()); log.warn("Invalid indexedByDocId at " + i); log.warn(".. found .. " + indexedByDocId.get(i)); log.warn(".. expected .. " + checkIndexedByDocId.get(i)); ok = false; thisTestOk = false; } } } thisTestOk = true; if (!checkAllLeafDocs.equals(allLeafDocs)) { if (thisTestOk) { log.warn("Core " + newSearcher.getIndexDir()); log.warn("Invalid AllLeafDocs cache"); ok = false; thisTestOk = false; } } thisTestOk = true; for (int i = 0; i < checkAclIdByDocId.length; i++) { if (checkAclIdByDocId[i] != aclIdByDocId[i]) { if (thisTestOk) { log.warn("Core " + newSearcher.getIndexDir()); log.warn("Invalid AclIdByDocId cache at " + i); log.warn(".. found .. " + aclIdByDocId[i]); log.warn(".. expected .. " + checkAclIdByDocId[i]); try { log.warn(".. expected .. " + newSearcher.doc(i)); } catch (IOException e) { log.error("IO Exception", e); } ok = false; thisTestOk = false; } } } thisTestOk = true; for (int i = 0; i < checkTxIdByDocId.length; i++) { if (checkTxIdByDocId[i] != txByDocId[i]) { if (thisTestOk) { log.warn("Core " + newSearcher.getIndexDir()); log.warn("Invalid txByDocId cache at " + i); log.warn(".. found .. " + txByDocId[i]); log.warn(".. expected .. " + checkTxIdByDocId[i]); try { log.warn(".. expected .. " + newSearcher.doc(i)); } catch (IOException e) { log.error("IO Exception", e); } ok = false; thisTestOk = false; } } } thisTestOk = true; for (int i = 0; i < checkAclTxIdByDocId.length; i++) { if (checkAclTxIdByDocId[i] != aclTxByDocId[i]) { if (thisTestOk) { log.warn("Core " + newSearcher.getIndexDir()); log.warn("Invalid aclTxByDocId cache at " + i); log.warn(".. found .. " + aclTxByDocId[i]); log.warn(".. expected .. " + checkAclTxIdByDocId[i]); try { log.warn(".. expected .. " + newSearcher.doc(i)); } catch (IOException e) { log.error("IO Exception", e); } ok = false; thisTestOk = false; } } } if (!ok) { indexedByDocId.copyFrom(checkIndexedByDocId); allLeafDocs = checkAllLeafDocs; aclIdByDocId = checkAclIdByDocId; txByDocId = checkTxIdByDocId; aclTxByDocId = checkAclTxIdByDocId; log.warn("... Using recomputed cache"); } else { log.info("... cache OK"); } } } catch (IllegalStateException ise) { log.info("Cache state error -> rebuilding", ise); buildCacheForReader(indexedByDocId, allLeafDocs, aclIdByDocId, txByDocId, aclTxByDocId, newReader, 0, newReader.maxDoc(), new HashMap<Long, CacheEntry>(), ownerIdManager); } } } else { buildCacheForReader(indexedByDocId, allLeafDocs, aclIdByDocId, txByDocId, aclTxByDocId, newReader, 0, newReader.maxDoc(), new HashMap<Long, CacheEntry>(), ownerIdManager); } long endTime = System.nanoTime(); log.info("Core cache rebuilt in " + ((endTime - startTime) / (1.0e9))); startTime = System.nanoTime(); int size = doPermissionChecks ? (int) allLeafDocs.cardinality() : 0; ResizeableArrayList<CacheEntry> indexedOderedByAclIdThenDoc = (ResizeableArrayList<CacheEntry>) newSearcher .cacheLookup(ALFRESCO_ARRAYLIST_CACHE, KEY_DBID_LEAF_PATH_BY_ACL_ID_THEN_LEAF); indexedOderedByAclIdThenDoc.resize(size); ResizeableArrayList<CacheEntry> indexedOderedByOwnerIdThenDoc = (ResizeableArrayList<CacheEntry>) newSearcher .cacheLookup(ALFRESCO_ARRAYLIST_CACHE, KEY_DBID_LEAF_PATH_BY_OWNER_ID_THEN_LEAF); indexedOderedByOwnerIdThenDoc.resize(size); if (doPermissionChecks) { int doc = -1; int pos = 0; while ((doc = allLeafDocs.nextSetBit(doc + 1)) != -1) { CacheEntry entry = indexedByDocId.get(doc); indexedOderedByAclIdThenDoc.set(pos, entry); indexedOderedByOwnerIdThenDoc.set(pos, entry); pos++; } } indexedOderedByAclIdThenDoc.sort(new Comparator<CacheEntry>() { @Override public int compare(CacheEntry o1, CacheEntry o2) { if (o2 == null) { if (o1 == null) { return 0; } else { return -1; // nulls at the end } } else { if (o1 == null) { return 1; } else { long diff = o1.getAclid() - o2.getAclid(); if (diff == 0L) { return o1.getLeaf() - o2.getLeaf(); } else { return (diff > 0L) ? 1 : -1; } } } } }); // build lookups HashMap<AclLookUp, AclLookUp> aclLookUp = new HashMap<AclLookUp, AclLookUp>(); AclLookUp currentAclLookUp = null; for (int i = 0; i < indexedOderedByAclIdThenDoc.size(); i++) { CacheEntry entry = indexedOderedByAclIdThenDoc.get(i); if (entry != null) { if (currentAclLookUp == null) { currentAclLookUp = new AclLookUp(entry.getAclid(), i); } else { if (currentAclLookUp.aclid == entry.aclid) { // carry on } else { // acl id has changed - new set currentAclLookUp.setEnd(i); AclLookUp next = new AclLookUp(entry.getAclid(), i); aclLookUp.put(currentAclLookUp, currentAclLookUp); currentAclLookUp = next; } } } else { // found first null we are done if (currentAclLookUp != null) { currentAclLookUp.setEnd(i); aclLookUp.put(currentAclLookUp, currentAclLookUp); } break; } } if (currentAclLookUp != null) { currentAclLookUp.setEnd(indexedOderedByAclIdThenDoc.size()); aclLookUp.put(currentAclLookUp, currentAclLookUp); } indexedOderedByOwnerIdThenDoc.sort(new Comparator<CacheEntry>() { @Override public int compare(CacheEntry o1, CacheEntry o2) { if (o2 == null) { if (o1 == null) { return 0; } else { return -1; // nulls at the end } } else { if (o1 == null) { return 1; } else { int diff = o1.getOwner() - o2.getOwner(); if (diff == 0) { return o1.getLeaf() - o2.getLeaf(); } else { return diff; } } } } }); // build lookups HashMap<String, OwnerLookUp> ownerLookUp = new HashMap<String, OwnerLookUp>(); OwnerLookUp currentOwnerLookUp = null; for (int i = 0; i < indexedOderedByOwnerIdThenDoc.size(); i++) { CacheEntry entry = indexedOderedByOwnerIdThenDoc.get(i); if (entry != null) { if (currentOwnerLookUp == null) { currentOwnerLookUp = new OwnerLookUp(entry.getOwner(), i); } else { if (currentOwnerLookUp.owner == entry.owner) { // carry on } else { // acl id has changed - new set currentOwnerLookUp.setEnd(i); OwnerLookUp next = new OwnerLookUp(entry.getOwner(), i); try { ownerLookUp.put(ownerIdManager.get(currentOwnerLookUp.owner), currentOwnerLookUp); } catch (IndexOutOfBoundsException e) { log.warn(" " + ownerIdManager); log.warn(" looking for " + currentOwnerLookUp.owner); throw e; } currentOwnerLookUp = next; } } } else { // found first null we are done if (currentOwnerLookUp != null) { currentOwnerLookUp.setEnd(i); try { ownerLookUp.put(ownerIdManager.get(currentOwnerLookUp.owner), currentOwnerLookUp); } catch (IndexOutOfBoundsException e) { log.warn(" " + ownerIdManager); log.warn(" looking for " + currentOwnerLookUp.owner); throw e; } } break; } } if (currentOwnerLookUp != null) { currentOwnerLookUp.setEnd(indexedOderedByOwnerIdThenDoc.size()); try { ownerLookUp.put(ownerIdManager.get(currentOwnerLookUp.owner), currentOwnerLookUp); } catch (IndexOutOfBoundsException e) { log.warn(" " + ownerIdManager); log.warn(" looking for " + currentOwnerLookUp.owner); throw e; } } // cache readers and acl doc ids //HashMap<String, HashSet<Long>> readerToAclIds = new HashMap<String, HashSet<Long>>(); BitDocSet publicDocSet = new BitDocSet(new OpenBitSet(newReader.maxDoc())); if (doPermissionChecks) { try { HashSet<Long> globallyReadableAcls = buildReaderAclIds(newSearcher, "GROUP_EVERYONE", aclIdByDocId); newSearcher.cacheInsert(AlfrescoSolrEventListener.ALFRESCO_READER_TO_ACL_IDS_CACHE, "GROUP_EVERYONE", globallyReadableAcls); AclLookUp key = new AclLookUp(0); for (Long longAcl : globallyReadableAcls) { key.setAclid(longAcl); AlfrescoSolrEventListener.AclLookUp value = aclLookUp.get(key); if (value != null) { for (int i = value.getStart(); i < value.getEnd(); i++) { publicDocSet.add(indexedOderedByAclIdThenDoc.get(i).getLeaf()); } } } } catch (IOException e) { log.error("IO Exception while warming searcher", e); } } // transform to readers to acl ids endTime = System.nanoTime(); log.info("Derived caches rebuilt in " + ((endTime - startTime) / (1.0e9))); newSearcher.cacheInsert(ALFRESCO_CACHE, KEY_ACL_ID_BY_DOC_ID, aclIdByDocId); newSearcher.cacheInsert(ALFRESCO_CACHE, KEY_TX_ID_BY_DOC_ID, txByDocId); newSearcher.cacheInsert(ALFRESCO_CACHE, KEY_ACL_TX_ID_BY_DOC_ID, aclTxByDocId); // TODO: Make global readers configurable. globalReaders.add(PermissionService.OWNER_AUTHORITY); globalReaders.add(PermissionService.ADMINISTRATOR_AUTHORITY); globalReaders.add(AuthenticationUtil.getSystemUserName()); newSearcher.cacheInsert(ALFRESCO_CACHE, KEY_GLOBAL_READERS, globalReaders); newSearcher.cacheInsert(ALFRESCO_CACHE, KEY_ALL_LEAF_DOCS, allLeafDocs); newSearcher.cacheInsert(ALFRESCO_CACHE, KEY_ACL_LOOKUP, aclLookUp); newSearcher.cacheInsert(ALFRESCO_CACHE, KEY_OWNER_LOOKUP, ownerLookUp); newSearcher.cacheInsert(ALFRESCO_CACHE, KEY_OWNER_ID_MANAGER, ownerIdManager); newSearcher.cacheInsert(ALFRESCO_CACHE, KEY_PUBLIC_DOC_SET, publicDocSet); try { if (currentSearcher != null) { newSearcher.warm(currentSearcher); } } catch (IOException e) { log.error("IO Exception while warming searcher", e); } }
From source file:org.alfresco.solr.AlfrescoUpdateHandler.java
License:Open Source License
public void commit(CommitUpdateCommand cmd) throws IOException { if (cmd.optimize) { optimizeCommands.incrementAndGet(); } else {//from ww w . j av a2s . c o m commitCommands.incrementAndGet(); if (cmd.expungeDeletes) expungeDeleteCommands.incrementAndGet(); } Future[] waitSearcher = null; if (cmd.waitSearcher) { waitSearcher = new Future[1]; } boolean error = true; iwCommit.lock(); try { log.info("start " + cmd); if (cmd.optimize) { openWriter(); writer.optimize(cmd.maxOptimizeSegments); } else if (cmd.expungeDeletes) { openWriter(); writer.expungeDeletes(); } closeWriter(); callPostCommitCallbacks(); if (cmd.optimize) { callPostOptimizeCallbacks(); } // Add tracking data to the old searcher RefCounted<SolrIndexSearcher> refCounted = core.getSearcher(false, true, null); SolrIndexSearcher oldSearcher = refCounted.get(); oldSearcher.cacheInsert(AlfrescoSolrEventListener.ALFRESCO_CACHE, AlfrescoSolrEventListener.KEY_ADDED_LEAVES, addedLeaves); oldSearcher.cacheInsert(AlfrescoSolrEventListener.ALFRESCO_CACHE, AlfrescoSolrEventListener.KEY_ADDED_AUX, addedAux); oldSearcher.cacheInsert(AlfrescoSolrEventListener.ALFRESCO_CACHE, AlfrescoSolrEventListener.KEY_ADDED_ACL, addedAcl); oldSearcher.cacheInsert(AlfrescoSolrEventListener.ALFRESCO_CACHE, AlfrescoSolrEventListener.KEY_UPDATED_LEAVES, updatedLeaves); oldSearcher.cacheInsert(AlfrescoSolrEventListener.ALFRESCO_CACHE, AlfrescoSolrEventListener.KEY_UPDATED_AUX, updatedAux); oldSearcher.cacheInsert(AlfrescoSolrEventListener.ALFRESCO_CACHE, AlfrescoSolrEventListener.KEY_UPDATED_ACL, updatedAcl); oldSearcher.cacheInsert(AlfrescoSolrEventListener.ALFRESCO_CACHE, AlfrescoSolrEventListener.KEY_DELETED_LEAVES, deletedLeaves); oldSearcher.cacheInsert(AlfrescoSolrEventListener.ALFRESCO_CACHE, AlfrescoSolrEventListener.KEY_DELETED_AUX, deletedAux); oldSearcher.cacheInsert(AlfrescoSolrEventListener.ALFRESCO_CACHE, AlfrescoSolrEventListener.KEY_DELETED_ACL, deletedAcl); oldSearcher.cacheInsert(AlfrescoSolrEventListener.ALFRESCO_CACHE, AlfrescoSolrEventListener.KEY_DELETE_ALL, deleteAll); oldSearcher.cacheInsert(AlfrescoSolrEventListener.ALFRESCO_CACHE, AlfrescoSolrEventListener.KEY_CHECK_CACHE, checkCache); oldSearcher.cacheInsert(AlfrescoSolrEventListener.ALFRESCO_CACHE, AlfrescoSolrEventListener.KEY_ADDED_TX, addedTx); oldSearcher.cacheInsert(AlfrescoSolrEventListener.ALFRESCO_CACHE, AlfrescoSolrEventListener.KEY_DELETED_TX, deletedTx); oldSearcher.cacheInsert(AlfrescoSolrEventListener.ALFRESCO_CACHE, AlfrescoSolrEventListener.KEY_UPDATED_TX, updatedTx); oldSearcher.cacheInsert(AlfrescoSolrEventListener.ALFRESCO_CACHE, AlfrescoSolrEventListener.KEY_ADDED_ACL_TX, addedAclTx); oldSearcher.cacheInsert(AlfrescoSolrEventListener.ALFRESCO_CACHE, AlfrescoSolrEventListener.KEY_DELETED_ACL_TX, deletedAclTx); oldSearcher.cacheInsert(AlfrescoSolrEventListener.ALFRESCO_CACHE, AlfrescoSolrEventListener.KEY_UPDATED_ACL_TX, updatedAclTx); refCounted.decref(); // open a new searcher in the sync block to avoid opening it // after a deleteByQuery changed the index, or in between deletes // and adds of another commit being done. core.getSearcher(true, false, waitSearcher); // reset commit tracking tracker.didCommit(); log.info("end_commit_flush"); addedLeaves = new ConcurrentHashMap<Long, Long>(); addedAux = new ConcurrentHashMap<Long, Long>(); addedAcl = new ConcurrentHashMap<Long, Long>(); updatedLeaves = new ConcurrentHashMap<Long, Long>(); updatedAux = new ConcurrentHashMap<Long, Long>(); updatedAcl = new ConcurrentHashMap<Long, Long>(); deletedLeaves = new ConcurrentHashMap<Long, Long>(); deletedAux = new ConcurrentHashMap<Long, Long>(); deletedAcl = new ConcurrentHashMap<Long, Long>(); deleteAll = new AtomicBoolean(false); checkCache = new AtomicBoolean(false); addedTx = new ConcurrentHashMap<Long, Long>(); deletedTx = new ConcurrentHashMap<Long, Long>(); updatedTx = new ConcurrentHashMap<Long, Long>(); addedAclTx = new ConcurrentHashMap<Long, Long>(); deletedAclTx = new ConcurrentHashMap<Long, Long>(); updatedAclTx = new ConcurrentHashMap<Long, Long>(); error = false; } finally { iwCommit.unlock(); addCommands.set(0); deleteByIdCommands.set(0); deleteByQueryCommands.set(0); numErrors.set(error ? 1 : 0); } // if we are supposed to wait for the searcher to be registered, then we should do it // outside of the synchronized block so that other update operations can proceed. if (waitSearcher != null && waitSearcher[0] != null) { try { waitSearcher[0].get(); } catch (InterruptedException e) { SolrException.log(log, e); } catch (ExecutionException e) { SolrException.log(log, e); } } }
From source file:org.alfresco.solr.query.SolrAuthorityScorer.java
License:Open Source License
public static SolrAuthorityScorer createAuthorityScorer(Weight weight, LeafReaderContext context, SolrIndexSearcher searcher, String authority) throws IOException { Properties p = searcher.getSchema().getResourceLoader().getCoreProperties(); boolean doPermissionChecks = Boolean.parseBoolean(p.getProperty("alfresco.doPermissionChecks", "true")); Query key = new SolrAuthorityQuery(authority); DocSet answer = (DocSet) searcher.cacheLookup(CacheConstants.ALFRESCO_AUTHORITY_CACHE, key); if (answer != null) { // Answer was in the cache, so return it. return new SolrAuthorityScorer(weight, answer, context, searcher); }/*from w ww . j ava2 s. com*/ // Answer was not in cache, so build the results, cache and return. final HashSet<String> globalReaders = GlobalReaders.getReaders(); if (globalReaders.contains(authority) || (doPermissionChecks == false)) { // can read all DocSet allDocs = searcher.getDocSet(new MatchAllDocsQuery()); return new SolrAuthorityScorer(weight, allDocs, context, searcher); } // Docs for which the authority has explicit read access. DocSet readableDocSet = searcher.getDocSet(new SolrReaderQuery(authority)); // Are all doc owners granted read permissions at a global level? if (globalReaders.contains(PermissionService.OWNER_AUTHORITY)) { // Get the set of docs owned by the authority (which they can therefore read). DocSet authorityOwnedDocs = searcher.getDocSet(new SolrOwnerQuery(authority)); // Final set of docs that the authority can read. DocSet toCache = readableDocSet.union(authorityOwnedDocs); searcher.cacheInsert(CacheConstants.ALFRESCO_AUTHORITY_CACHE, key, toCache); return new SolrAuthorityScorer(weight, toCache, context, searcher); } else { // for that docs I own that have owner Read rights DocSet ownerReadableDocSet = searcher.getDocSet(new SolrReaderQuery(PermissionService.OWNER_AUTHORITY)); DocSet authorityOwnedDocs = searcher.getDocSet(new SolrOwnerQuery(authority)); // Docs where the authority is an owner and where owners have read rights. DocSet docsAuthorityOwnsAndCanRead = ownerReadableDocSet.intersection(authorityOwnedDocs); // Final set of docs that the authority can read. DocSet toCache = readableDocSet.union(docsAuthorityOwnsAndCanRead); searcher.cacheInsert(CacheConstants.ALFRESCO_AUTHORITY_CACHE, key, toCache); return new SolrAuthorityScorer(weight, toCache, context, searcher); } }
From source file:org.alfresco.solr.query.SolrAuthoritySetScorer.java
License:Open Source License
public static SolrAuthoritySetScorer createAuthoritySetScorer(Weight weight, LeafReaderContext context, SolrIndexSearcher searcher, String authorities) throws IOException { Properties p = searcher.getSchema().getResourceLoader().getCoreProperties(); boolean doPermissionChecks = Boolean.parseBoolean(p.getProperty("alfresco.doPermissionChecks", "true")); Query key = new SolrAuthoritySetQuery(authorities); DocSet answer = (DocSet) searcher.cacheLookup(CacheConstants.ALFRESCO_AUTHORITY_CACHE, key); if (answer != null) { // Answer was in the cache, so return it. return new SolrAuthoritySetScorer(weight, answer, context, searcher); }//from www .j av a 2s . c o m // Answer was not in cache, so build the results, cache and return. String[] auths = authorities.substring(1).split(authorities.substring(0, 1)); boolean hasGlobalRead = false; final HashSet<String> globalReaders = GlobalReaders.getReaders(); for (String auth : auths) { if (globalReaders.contains(auth)) { hasGlobalRead = true; break; } } if (hasGlobalRead || (doPermissionChecks == false)) { // can read all WrappedQuery wrapped = new WrappedQuery(new MatchAllDocsQuery()); wrapped.setCache(false); DocSet allDocs = searcher.getDocSet(wrapped); return new SolrAuthoritySetScorer(weight, allDocs, context, searcher); } // Docs for which the authorities have explicit read access. WrappedQuery wrapped; wrapped = new WrappedQuery(new SolrReaderSetQuery(authorities)); wrapped.setCache(false); DocSet readableDocSet = searcher.getDocSet(wrapped); // Are all doc owners granted read permissions at a global level? if (globalReaders.contains(PermissionService.OWNER_AUTHORITY)) { // Get the set of docs owned by the authorities (which they can therefore read). wrapped = new WrappedQuery(new SolrOwnerSetQuery(authorities)); wrapped.setCache(false); DocSet authorityOwnedDocs = searcher.getDocSet(wrapped); // Final set of docs that the authorities can read. DocSet toCache = readableDocSet.union(authorityOwnedDocs); searcher.cacheInsert(CacheConstants.ALFRESCO_AUTHORITY_CACHE, key, toCache); return new SolrAuthoritySetScorer(weight, toCache, context, searcher); } else { // for that docs I own that have owner Read rights wrapped = new WrappedQuery(new SolrReaderSetQuery("|" + PermissionService.OWNER_AUTHORITY)); wrapped.setCache(false); DocSet ownerReadableDocSet = searcher.getDocSet(wrapped); wrapped = new WrappedQuery(new SolrOwnerSetQuery(authorities)); wrapped.setCache(false); DocSet authorityOwnedDocs = searcher.getDocSet(wrapped); // Docs where the authority is an owner and where owners have read rights. DocSet docsAuthorityOwnsAndCanRead = ownerReadableDocSet.intersection(authorityOwnedDocs); // Final set of docs that the authorities can read. DocSet toCache = readableDocSet.union(docsAuthorityOwnsAndCanRead); searcher.cacheInsert(CacheConstants.ALFRESCO_AUTHORITY_CACHE, key, toCache); return new SolrAuthoritySetScorer(weight, toCache, context, searcher); } }
From source file:org.alfresco.solr.query.SolrCachingAuthorityScorer.java
License:Open Source License
public static SolrCachingAuthorityScorer createAuthorityScorer(SolrIndexSearcher searcher, Similarity similarity, String authority, SolrIndexReader reader) throws IOException { // Get hold of solr top level searcher // Execute query with caching // translate reults to leaf docs // build ordered doc list Query key = new SolrCachingAuthorityQuery(authority); DocSet answer = (DocSet) searcher.cacheLookup(AlfrescoSolrEventListener.ALFRESCO_AUTHORITY_CACHE, key); if (answer != null) { return new SolrCachingAuthorityScorer(similarity, answer, reader); }/*from ww w. j a v a 2 s . c o m*/ HashSet<String> globalReaders = (HashSet<String>) searcher.cacheLookup( AlfrescoSolrEventListener.ALFRESCO_CACHE, AlfrescoSolrEventListener.KEY_GLOBAL_READERS); if (globalReaders.contains(authority)) { // can read all OpenBitSet allLeafDocs = (OpenBitSet) searcher.cacheLookup(AlfrescoSolrEventListener.ALFRESCO_CACHE, AlfrescoSolrEventListener.KEY_ALL_LEAF_DOCS); DocSet toCache = new BitDocSet(allLeafDocs); searcher.cacheInsert(AlfrescoSolrEventListener.ALFRESCO_AUTHORITY_CACHE, key, toCache); return new SolrCachingAuthorityScorer(similarity, toCache, reader); } DocSet readableDocSet = searcher.getDocSet(new SolrCachingReaderQuery(authority)); if (globalReaders.contains(PermissionService.OWNER_AUTHORITY)) { DocSet authorityOwnedDocs = searcher.getDocSet(new SolrCachingOwnerQuery(authority)); DocSet toCache = readableDocSet.union(authorityOwnedDocs); searcher.cacheInsert(AlfrescoSolrEventListener.ALFRESCO_AUTHORITY_CACHE, key, toCache); return new SolrCachingAuthorityScorer(similarity, toCache, reader); } else { // for that docs I own that have owner Read rights DocSet ownerReadableDocSet = searcher .getDocSet(new SolrCachingReaderQuery(PermissionService.OWNER_AUTHORITY)); DocSet authorityOwnedDocs = searcher.getDocSet(new SolrCachingOwnerQuery(authority)); DocSet docsAuthorityOwnsAndCanRead = ownerReadableDocSet.intersection(authorityOwnedDocs); DocSet toCache = readableDocSet.union(docsAuthorityOwnsAndCanRead); searcher.cacheInsert(AlfrescoSolrEventListener.ALFRESCO_AUTHORITY_CACHE, key, toCache); return new SolrCachingAuthorityScorer(similarity, toCache, reader); } }
From source file:org.alfresco.solr.query.SolrCachingPathQuery.java
License:Open Source License
public Weight createWeight(IndexSearcher indexSearcher, boolean requiresScore) throws IOException { SolrIndexSearcher searcher = null; if (!(indexSearcher instanceof SolrIndexSearcher)) { throw new IllegalStateException("Must have a SolrIndexSearcher"); } else {/*from www .j a va 2s . com*/ searcher = (SolrIndexSearcher) indexSearcher; } DocSet results = (DocSet) searcher.cacheLookup(CacheConstants.ALFRESCO_PATH_CACHE, pathQuery); if (results == null) { // Cache miss: get path query results and cache them WrappedQuery wrapped = new WrappedQuery(pathQuery); wrapped.setCache(false); results = searcher.getDocSet(wrapped); searcher.cacheInsert(CacheConstants.ALFRESCO_PATH_CACHE, pathQuery, results); } return new ConstantScoreQuery(results.getTopFilter()).createWeight(searcher, false); }
From source file:org.alfresco.solr.query.SolrCachingPathScorer.java
License:Open Source License
/** * Factory method used to create {@link SolrCachingPathScorer} instances. * @param acceptDocs /*from w w w . j a v a 2 s . c o m*/ */ public static SolrCachingPathScorer create(SolrCachingPathWeight weight, LeafReaderContext context, SolrIndexSearcher searcher, SolrPathQuery wrappedPathQuery) throws IOException { DocSet results = (DocSet) searcher.cacheLookup(CacheConstants.ALFRESCO_PATH_CACHE, wrappedPathQuery); if (results == null) { // Cache miss: get path query results and cache them WrappedQuery wrapped = new WrappedQuery(wrappedPathQuery); wrapped.setCache(false); results = searcher.getDocSet(wrapped); searcher.cacheInsert(CacheConstants.ALFRESCO_PATH_CACHE, wrappedPathQuery, results); } return new SolrCachingPathScorer(weight, results, context, searcher); }
From source file:org.alfresco.solr.query.SolrDeniedScorer.java
License:Open Source License
public static SolrDeniedScorer createDenyScorer(Weight weight, LeafReaderContext context, SolrIndexSearcher searcher, String authority) throws IOException { DocSet deniedDocs = (DocSet) searcher.cacheLookup(CacheConstants.ALFRESCO_DENIED_CACHE, authority); if (deniedDocs == null) { // Cache miss: query the index for ACL docs where the denial matches the authority. DocSet aclDocs = searcher.getDocSet(new TermQuery(new Term(QueryConstants.FIELD_DENIED, authority))); // Allocate a bitset to store the results. deniedDocs = new BitDocSet(new FixedBitSet(searcher.maxDoc())); // Translate from ACL docs to real docs for (DocIterator it = aclDocs.iterator(); it.hasNext(); /**/) { int docID = it.nextDoc(); // Obtain the ACL ID for this ACL doc. long aclID = searcher.getSlowAtomicReader().getNumericDocValues(QueryConstants.FIELD_ACLID) .get(docID);/*from w ww . j a va2 s . c o m*/ SchemaField schemaField = searcher.getSchema().getField(QueryConstants.FIELD_ACLID); Query query = schemaField.getType().getFieldQuery(null, schemaField, Long.toString(aclID)); // Find real docs that match the ACL ID DocSet docsForAclId = searcher.getDocSet(query); deniedDocs = deniedDocs.union(docsForAclId); // Exclude the ACL docs from the results, we only want real docs that match. // Probably not very efficient, what we really want is remove(docID) deniedDocs = deniedDocs.andNot(aclDocs); } searcher.cacheInsert(CacheConstants.ALFRESCO_DENIED_CACHE, authority, deniedDocs); } return new SolrDeniedScorer(weight, deniedDocs, context, searcher); }
From source file:org.alfresco.solr.query.SolrDenySetScorer.java
License:Open Source License
public static SolrDenySetScorer createDenySetScorer(Weight weight, LeafReaderContext context, SolrIndexSearcher searcher, String authorities, LeafReader reader) throws IOException { DocSet deniedDocSet = (DocSet) searcher.cacheLookup(CacheConstants.ALFRESCO_DENIED_CACHE, authorities); if (deniedDocSet == null) { String[] auths = authorities.substring(1).split(authorities.substring(0, 1)); deniedDocSet = new BitDocSet(new FixedBitSet(searcher.maxDoc())); BooleanQuery.Builder bQuery = new BooleanQuery.Builder(); for (String current : auths) { bQuery.add(new TermQuery(new Term(QueryConstants.FIELD_DENIED, current)), Occur.SHOULD); }// w ww . j a va2 s . c om DocSet aclDocs = searcher.getDocSet(bQuery.build()); BooleanQuery.Builder aQuery = new BooleanQuery.Builder(); for (DocIterator it = aclDocs.iterator(); it.hasNext(); /**/) { int docID = it.nextDoc(); // Obtain the ACL ID for this ACL doc. long aclID = searcher.getSlowAtomicReader().getNumericDocValues(QueryConstants.FIELD_ACLID) .get(docID); SchemaField schemaField = searcher.getSchema().getField(QueryConstants.FIELD_ACLID); Query query = schemaField.getType().getFieldQuery(null, schemaField, Long.toString(aclID)); aQuery.add(query, Occur.SHOULD); if ((aQuery.build().clauses().size() > 999) || !it.hasNext()) { DocSet docsForAclId = searcher.getDocSet(aQuery.build()); deniedDocSet = deniedDocSet.union(docsForAclId); aQuery = new BooleanQuery.Builder(); } } // Exclude the ACL docs from the results, we only want real docs that match. // Probably not very efficient, what we really want is remove(docID) deniedDocSet = deniedDocSet.andNot(aclDocs); searcher.cacheInsert(CacheConstants.ALFRESCO_DENIED_CACHE, authorities, deniedDocSet); } // TODO: cache the full set? e.g. searcher.cacheInsert(CacheConstants.ALFRESCO_READERSET_CACHE, authorities, readableDocSet) // plus check of course, for presence in cache at start of method. return new SolrDenySetScorer(weight, deniedDocSet, context, searcher); }
From source file:org.alfresco.solr.query.SolrDenySetScorer2.java
License:Open Source License
public static SolrDenySetScorer2 createDenySetScorer(Weight weight, LeafReaderContext context, SolrIndexSearcher searcher, String authorities, LeafReader reader) throws IOException { DocSet deniedDocSet = (DocSet) searcher.cacheLookup(CacheConstants.ALFRESCO_DENIED_CACHE, authorities); if (deniedDocSet == null) { String[] auths = authorities.substring(1).split(authorities.substring(0, 1)); deniedDocSet = new BitDocSet(new FixedBitSet(searcher.maxDoc())); BooleanQuery.Builder bQuery = new BooleanQuery.Builder(); for (String current : auths) { bQuery.add(new TermQuery(new Term(QueryConstants.FIELD_DENIED, current)), Occur.SHOULD); }//ww w. ja v a 2 s .c o m WrappedQuery wrapped = new WrappedQuery(bQuery.build()); wrapped.setCache(false); DocSet aclDocs = searcher.getDocSet(wrapped); HashSet<Long> aclsFound = new HashSet<Long>(aclDocs.size()); NumericDocValues aclDocValues = searcher.getSlowAtomicReader() .getNumericDocValues(QueryConstants.FIELD_ACLID); for (DocIterator it = aclDocs.iterator(); it.hasNext(); /**/) { int docID = it.nextDoc(); // Obtain the ACL ID for this ACL doc. long aclID = aclDocValues.get(docID); aclsFound.add(getLong(aclID)); } if (aclsFound.size() > 0) { for (LeafReaderContext readerContext : searcher.getSlowAtomicReader().leaves()) { int maxDoc = readerContext.reader().maxDoc(); NumericDocValues fieldValues = DocValuesCache.getNumericDocValues(QueryConstants.FIELD_ACLID, readerContext.reader()); if (fieldValues != null) { for (int i = 0; i < maxDoc; i++) { long aclID = fieldValues.get(i); Long key = getLong(aclID); if (aclsFound.contains(key)) { deniedDocSet.add(readerContext.docBase + i); } } } } } // Exclude the ACL docs from the results, we only want real docs that match. // Probably not very efficient, what we really want is remove(docID) deniedDocSet = deniedDocSet.andNot(aclDocs); searcher.cacheInsert(CacheConstants.ALFRESCO_DENIED_CACHE, authorities, deniedDocSet); } // TODO: cache the full set? e.g. searcher.cacheInsert(CacheConstants.ALFRESCO_READERSET_CACHE, authorities, readableDocSet) // plus check of course, for presence in cache at start of method. return new SolrDenySetScorer2(weight, deniedDocSet, context, searcher); }