Example usage for org.apache.lucene.index IndexReader close

List of usage examples for org.apache.lucene.index IndexReader close

Introduction

In this page you can find the example usage for org.apache.lucene.index IndexReader close.

Prototype

@Override
public final synchronized void close() throws IOException 

Source Link

Document

Closes files associated with this index.

Usage

From source file:nl.strohalm.cyclos.utils.lucene.LuceneQueryHandler.java

License:Open Source License

private <E extends Entity & Indexable> List<E> listOrPage(final Class<E> entityType, final Query query,
        final Filter filter, final Sort sort, final ResultType resultType, final PageParameters pageParameters,
        final Relationship... fetch) {
    IndexSearcher searcher = null;//from   w ww .j  av  a 2s.c o  m
    // Prepare the parameters
    IndexReader reader;
    try {
        reader = indexHandler.openReader(entityType);
    } catch (final DaoException e) {
        // Probably index files don't exist
        return Collections.emptyList();
    }
    final int firstResult = pageParameters == null ? 0 : pageParameters.getFirstResult();
    int maxResults = pageParameters == null ? Integer.MAX_VALUE : pageParameters.getMaxResults() + firstResult;
    try {
        searcher = new IndexSearcher(reader);
        if (maxResults == 0 && resultType == ResultType.PAGE) {
            // We just want the total hit count.
            TotalHitCountCollector collector = new TotalHitCountCollector();
            searcher.search(query, filter, collector);
            int totalHits = collector.getTotalHits();
            return new PageImpl<E>(pageParameters, totalHits, Collections.<E>emptyList());
        } else {
            if (maxResults == 0) {
                maxResults = Integer.MAX_VALUE;
            }
            // Run the search
            TopDocs topDocs;
            if (sort == null || ArrayUtils.isEmpty(sort.getSort())) {
                topDocs = searcher.search(query, filter, maxResults);
            } else {
                topDocs = searcher.search(query, filter, maxResults, sort);
            }

            // Build the list
            ScoreDoc[] scoreDocs = topDocs.scoreDocs;
            List<E> list = new ArrayList<E>(Math.min(firstResult, scoreDocs.length));
            for (int i = firstResult; i < scoreDocs.length; i++) {
                ScoreDoc scoreDoc = scoreDocs[i];
                E entity = toEntity(reader, scoreDoc.doc, entityType, fetch);
                if (entity != null) {
                    list.add(entity);
                }
            }

            // When result type is page, get the additional data
            if (resultType == ResultType.PAGE) {
                list = new PageImpl<E>(pageParameters, topDocs.totalHits, list);
            }
            return list;
        }
    } catch (final EntityNotFoundException e) {
        throw new ValidationException("general.error.indexedRecordNotFound");
    } catch (ApplicationException e) {
        throw e;
    } catch (final Exception e) {
        throw new DaoException(e);
    } finally {
        // Close resources
        try {
            searcher.close();
        } catch (final Exception e) {
            // Silently ignore
        }
        try {
            reader.close();
        } catch (final Exception e) {
            // Silently ignore
        }
    }
}

From source file:nmsu.cs.TFIDFVector.java

License:Open Source License

/**
 * calculate likelihood from the index//from  ww w  . j  ava 2 s. c om
  * @param indexDir
 * @param lambda
 */
public void calLikelihoodFromIndex(String indexDir, double lambda) {
    try {
        IndexReader ir = IndexReader.open(FSDirectory.open(new File(indexDir)));
        IndexSearcher is = new IndexSearcher(ir);
        int numDocs = ir.maxDoc();

        double LLH = 0;

        //vocabulary list
        List<String> vocab = new ArrayList<String>();

        TermEnum te = ir.terms();
        //create vocabulary
        while (te.next()) {
            String term = te.term().text();
            //            System.out.println(term);
            vocab.add(term);
        }
        TFIDFVector.vocabulary = vocab;

        //dataset id to index id
        Map<Integer, Integer> idMap = new HashMap<Integer, Integer>();

        for (int i = 0; i < numDocs; i++) {
            Document doc = ir.document(i);
            idMap.put(Integer.parseInt(doc.get("docid")), i);
        }

        //o -> a -> o'
        Map<Integer, Map<Integer, Map<Integer, Double>>> cosineSimMap = new HashMap<Integer, Map<Integer, Map<Integer, Double>>>();
        // (o | o') dataset id -> tfidf vector
        Map<Integer, TFIDFVector> docVectorMap = new HashMap<Integer, TFIDFVector>();
        // o -> a -> vector
        Map<Integer, Map<Integer, TFIDFVector>> docAspectVectorMap = new HashMap<Integer, Map<Integer, TFIDFVector>>();

        Set<Integer> citedSet = new HashSet<Integer>();
        //for all citing document
        for (Map.Entry<Integer, List<Integer>> entry : rawdata.pubId2CiteIds.entrySet()) {//llh for citing documents
            int citingDatasetID = entry.getKey();
            int citingIndexID = idMap.get(citingDatasetID);

            //set up citing document vector
            TFIDFVector citingVector = BaseLineMethod.getFullTextTFIDFVector(docVectorMap, ir, citingDatasetID,
                    citingIndexID, numDocs);
            float sum = citingVector.sum();

            //            System.out.println(Debugger.getCallerPosition()+" "+citingDatasetID);

            List<Integer> refList = entry.getValue();
            //for all aspects
            for (Integer aspectID : rawdata.id2Aspect.keySet()) {
                String aspect = rawdata.id2Aspect.get(aspectID);
                //set up citing document aspect vector
                double aspectSim = 0;
                if (rawdata.id2Docs.get(citingDatasetID).getText().get(aspectID).length() != 0) {
                    TFIDFVector citingAspectVector = BaseLineMethod.getAspectTFIDFVector(docAspectVectorMap, ir,
                            citingDatasetID, citingIndexID, aspectID, numDocs);
                    citingAspectVector.normalizedBy(sum);

                    int refSize = refList.size();
                    TFIDFVector[] citedVectors = new TFIDFVector[refSize];
                    double[] cosineSims = new double[refSize];
                    int count = 0;

                    //for all cited documents of this citing document
                    for (Integer citedDatasetID : refList) {
                        citedSet.add(citedDatasetID);
                        //set up cited document vector
                        int citedIndexID = idMap.get(citedDatasetID);
                        TFIDFVector citedVector = BaseLineMethod.getFullTextTFIDFVector(docVectorMap, ir,
                                citedDatasetID, citedIndexID, numDocs);
                        citedVector.normalize();

                        aspectSim = TFIDFVector.computeCosineSim(citedVector, citingAspectVector);
                        //                     System.out.println(Debugger.getCallerPosition()+"\t\t"+aspectSim);
                        System.out.println(
                                citingDatasetID + "\t" + aspectID + "\t" + citedDatasetID + "\t" + aspectSim);
                        citedVectors[count] = citedVector;
                        cosineSims[count] = aspectSim;
                        count++;
                    }
                    double aspectLLH = citingAspectVector.posteriorLLH(citedVectors, cosineSims, lambda);
                    LLH += aspectLLH;
                }
                //                  Util.update3Map(cosineSimMap, citingDatasetID, aspectID, citedDatasetID, aspectSim);
            }
        }

        for (Integer citedDatasetID : citedSet) {
            int citedIndexID = idMap.get(citedDatasetID);
            TFIDFVector citedVector = BaseLineMethod.getFullTextTFIDFVector(docVectorMap, ir, citedDatasetID,
                    citedIndexID, numDocs);
            citedVector.normalize();
            LLH += citedVector.priorLLH();
        }

        System.out.println(LLH);
        is.close();
        ir.close();

    } catch (CorruptIndexException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    } catch (IOException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }
}

From source file:nyu.wse.Retriever.java

License:Apache License

/** Simple command-line based search demo. */
public static void main(String[] args) throws Exception {
    String usage = "Usage:\tjava org.apache.lucene.demo.SearchFiles [-index dir] [-field f] [-repeat n] [-queries file] [-query string] [-raw] [-paging hitsPerPage]\n\nSee http://lucene.apache.org/core/4_1_0/demo/ for details.";
    if (args.length > 0 && ("-h".equals(args[0]) || "-help".equals(args[0]))) {
        System.out.println(usage);
        System.exit(0);/*  w  w w  .  j  ava  2s.  c  om*/
    }

    String index = "index";
    String field = "contents";
    String queries = null;
    int repeat = 0;
    boolean raw = false;
    String queryString = null;
    int hitsPerPage = 10;

    for (int i = 0; i < args.length; i++) {
        if ("-index".equals(args[i])) {
            index = args[i + 1];
            i++;
        } else if ("-field".equals(args[i])) {
            field = args[i + 1];
            i++;
        } else if ("-queries".equals(args[i])) {
            queries = args[i + 1];
            i++;
        } else if ("-query".equals(args[i])) {
            queryString = args[i + 1];
            i++;
        } else if ("-repeat".equals(args[i])) {
            repeat = Integer.parseInt(args[i + 1]);
            i++;
        } else if ("-raw".equals(args[i])) {
            raw = true;
        } else if ("-paging".equals(args[i])) {
            hitsPerPage = Integer.parseInt(args[i + 1]);
            if (hitsPerPage <= 0) {
                System.err.println("There must be at least 1 hit per page.");
                System.exit(1);
            }
            i++;
        }
    }

    IndexReader reader = DirectoryReader.open(FSDirectory.open(Paths.get(index)));
    IndexSearcher searcher = new IndexSearcher(reader);
    Analyzer analyzer = new StandardAnalyzer();

    BufferedReader in = null;
    if (queries != null) {
        in = Files.newBufferedReader(Paths.get(queries), StandardCharsets.UTF_8);
    } else {
        in = new BufferedReader(new InputStreamReader(System.in, StandardCharsets.UTF_8));
    }
    QueryParser parser = new QueryParser(field, analyzer);
    while (true) {
        if (queries == null && queryString == null) { // prompt the user
            System.out.println("Enter query: ");
        }

        String line = queryString != null ? queryString : in.readLine();

        if (line == null || line.length() == -1) {
            break;
        }

        line = line.trim();
        if (line.length() == 0) {
            break;
        }
        Query query = parser.parse(line);
        System.out.print("<h1>Results for query <u>" + query.toString(field) + "</u>");
        if (queries != null) {
            System.out.print(" in directoty <u>" + queries + "</u>");
        }
        System.out.println("</h1>");
        if (repeat > 0) { // repeat & time as benchmark
            Date start = new Date();
            for (int i = 0; i < repeat; i++) {
                searcher.search(query, 100);
            }
            Date end = new Date();
            System.out.println("Time: " + (end.getTime() - start.getTime()) + "ms");
        }

        doPagingSearch(in, searcher, query, hitsPerPage, raw, queries == null && queryString == null);

        if (queryString != null) {
            break;
        }
    }
    reader.close();
}

From source file:org.aksw.lucene.extractor.DocumentExtractor.java

License:Apache License

/**
 *  Filtering all streets by city/*from w w  w  .j a v  a 2 s  .  c o  m*/
 * @param cityFilter
 * @return
 * @throws IOException
 */
private List<Place> getPlaces(String cityFilter) throws IOException {

    List<Place> result = new ArrayList<Place>();

    LOG.debug("Reading streets by city...");
    LOG.debug("City:%s".format(city));

    IndexReader reader = IndexReader.open(FSDirectory.open(indexDirectory));
    IndexSearcher searcher = new IndexSearcher(reader);

    BooleanQuery bq = new BooleanQuery();
    bq.add(new TermQuery(new Term(IndexField.CITY, cityFilter.toLowerCase())), BooleanClause.Occur.MUST);

    ScoreDoc[] hits = searcher.search(bq, Integer.MAX_VALUE).scoreDocs;

    for (int i = 0; i < hits.length; i++) {

        Document doc = searcher.doc(hits[i].doc);

        String street = doc.get(IndexField.DESCRIPTION).toLowerCase();
        String city = doc.get(IndexField.CITY).toLowerCase();
        Place p = new Place();
        p.setName(street);
        p.setCity(city);
        result.add(p);

    }

    reader.close();

    return result;

}

From source file:org.aksw.lucene.search.IndexSearch.java

License:Apache License

@Override
public List<Place> findByDescription(Integer hitsPerPage, String indexPath, String queryString)
        throws IOException, ParseException {

    List<Place> result = new ArrayList<Place>();

    Query query = null;//from  w  w w .jav  a  2 s. co m

    query = new QueryParser(Version.LUCENE_43, IndexField.DESCRIPTION, analyzer).parse(queryString);

    File indexDir = new File(indexPath);

    IndexReader reader = IndexReader.open(FSDirectory.open(indexDir));

    IndexSearcher searcher = new IndexSearcher(reader);

    TopScoreDocCollector collector = TopScoreDocCollector.create(hitsPerPage, true);

    searcher.search(query, collector);

    ScoreDoc[] hits = collector.topDocs().scoreDocs;

    for (int i = 0; i < hits.length; ++i) {
        int docId = hits[i].doc;
        Document doc = searcher.doc(docId);

        List<String> docs = new ArrayList<String>();

        Place place = new Place();
        place.setName(doc.get(IndexField.DESCRIPTION));
        place.setLatitude(doc.get(IndexField.LATITUDE));
        place.setLongitude(doc.get(IndexField.LONGITUDE));
        place.setUrl(doc.get(IndexField.URL));
        place.setTypes(doc.get(IndexField.TYPES));
        place.setCity(doc.get(IndexField.CITY));

        result.add(place);

    }
    reader.close();

    return result;
}

From source file:org.aksw.lucene.search.IndexSearch.java

License:Apache License

@Override
public List<Place> getDocsByDescription(Integer hitsPerPage, String indexPath, String queryString)
        throws IOException, ParseException {

    String street = queryString.split(",")[0].trim();
    String city = queryString.split(",")[1].trim();
    List<Place> result = new ArrayList<Place>();
    File indexDir = new File(indexPath);
    IndexReader reader = IndexReader.open(FSDirectory.open(indexDir));
    IndexSearcher searcher = new IndexSearcher(reader);

    BooleanQuery bq = new BooleanQuery();
    bq.add(new TermQuery(new Term(IndexField.CITY, city.toLowerCase())), BooleanClause.Occur.MUST);
    bq.add(new TermQuery(new Term(IndexField.DESCRIPTION, street.toLowerCase())), BooleanClause.Occur.MUST);

    ScoreDoc[] hits = searcher.search(bq, Integer.MAX_VALUE).scoreDocs;

    for (int i = 0; i < hits.length; ++i) {
        int docId = hits[i].doc;
        Document doc = searcher.doc(docId);

        List<String> docs = new ArrayList<String>();

        Place place = new Place();
        place.setName(doc.get(IndexField.DESCRIPTION));
        place.setLatitude(doc.get(IndexField.LATITUDE));
        place.setLongitude(doc.get(IndexField.LONGITUDE));
        place.setUrl(doc.get(IndexField.URL));
        place.setTypes(doc.get(IndexField.TYPES));
        place.setCity(doc.get(IndexField.CITY));

        for (IndexableField f : doc.getFields(IndexField.DOCUMENT))
            docs.add(f.stringValue());/*from www .  j a  va 2  s.  c  o m*/
        place.setDocuments(docs);

        result.add(place);

    }
    reader.close();

    return result;

}

From source file:org.ala.lucene.Autocompleter.java

License:Open Source License

@SuppressWarnings("unchecked")
public void reIndex(Directory sourceDirectory, String fieldToAutocomplete, boolean createNewIndex)
        throws CorruptIndexException, IOException {
    // build a dictionary (from the spell package)
    IndexReader sourceReader = IndexReader.open(sourceDirectory);

    LuceneDictionary dict = new LuceneDictionary(sourceReader, fieldToAutocomplete);

    // code from/*from w w  w . j  a  va  2  s  .  c  o m*/
    // org.apache.lucene.search.spell.SpellChecker.indexDictionary(
    // Dictionary)
    IndexWriter.unlock(autoCompleteDirectory);

    // use a custom analyzer so we can do EdgeNGramFiltering
    IndexWriterConfig indexWriterConfig = new IndexWriterConfig(SolrUtils.BIE_LUCENE_VERSION, new Analyzer() {
        protected TokenStreamComponents createComponents(String fieldName, Reader reader) {
            final StandardTokenizer src = new StandardTokenizer(SolrUtils.BIE_LUCENE_VERSION, reader);
            TokenStream result = new StandardTokenizer(SolrUtils.BIE_LUCENE_VERSION, reader);
            result = new StandardFilter(SolrUtils.BIE_LUCENE_VERSION, result);
            result = new LowerCaseFilter(SolrUtils.BIE_LUCENE_VERSION, result);
            result = new StopFilter(SolrUtils.BIE_LUCENE_VERSION, result,
                    new CharArraySet(SolrUtils.BIE_LUCENE_VERSION,
                            new HashSet<String>(Arrays.asList(ENGLISH_STOP_WORDS)), true));
            result = new EdgeNGramTokenFilter(result, Side.FRONT, 1, 20);
            return new TokenStreamComponents(src, result) {
                @Override
                protected void setReader(final Reader reader) throws IOException {
                    super.setReader(reader);
                }

            };
        }
        //            public TokenStream tokenStream(String fieldName, Reader reader) {
        //            TokenStream result = new StandardTokenizer(SolrUtils.BIE_LUCENE_VERSION, reader);
        //            
        //            result = new StandardFilter(SolrUtils.BIE_LUCENE_VERSION, result);
        //            result = new LowerCaseFilter(SolrUtils.BIE_LUCENE_VERSION, result);
        //            //result = new ISOLatin1AccentFilter(result);
        //            result = new StopFilter(SolrUtils.BIE_LUCENE_VERSION, result, new HashSet<String>(Arrays.asList(ENGLISH_STOP_WORDS)));
        //            result = new EdgeNGramTokenFilter(result, Side.FRONT,1, 20);
        //            
        //            return result;
        //          }
    });
    if (createNewIndex) {
        indexWriterConfig.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
    } else {
        indexWriterConfig.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND);
    }
    indexWriterConfig.setMaxBufferedDocs(150);
    IndexWriter writer = new IndexWriter(autoCompleteDirectory, indexWriterConfig);
    //        writer.setMergeFactor(300);

    // go through every word, storing the original word (incl. n-grams)
    // and the number of times it occurs
    Map<String, Integer> wordsMap = new HashMap<String, Integer>();

    Iterator<String> iter = (Iterator<String>) dict.getWordsIterator();
    while (iter.hasNext()) {
        String word = iter.next();

        int len = word.length();
        if (len < 3) {
            continue; // too short we bail but "too long" is fine...
        }

        if (wordsMap.containsKey(word)) {
            throw new IllegalStateException("This should never happen in Lucene 2.3.2");
            // wordsMap.put(word, wordsMap.get(word) + 1);
        } else {
            // use the number of documents this word appears in
            wordsMap.put(word, sourceReader.docFreq(new Term(fieldToAutocomplete, word)));
        }
    }

    for (String word : wordsMap.keySet()) {
        // ok index the word
        Document doc = new Document();
        doc.add(new Field(SOURCE_WORD_FIELD, word, Field.Store.YES, Field.Index.NOT_ANALYZED)); // orig term
        doc.add(new Field(GRAMMED_WORDS_FIELD, word, Field.Store.YES, Field.Index.ANALYZED)); // grammed
        doc.add(new Field(COUNT_FIELD, Integer.toString(wordsMap.get(word)), Field.Store.NO,
                Field.Index.NOT_ANALYZED)); // count

        writer.addDocument(doc);
    }

    sourceReader.close();

    // close writer
    writer.forceMerge(1);
    writer.close();

    // re-open our reader
    reOpenReader();
}

From source file:org.alfresco.repo.search.impl.lucene.ADMLuceneIndexerImpl.java

License:Open Source License

private void addRootNodesToDeletionList() {
    IndexReader mainReader = null;
    try {//from  w ww.  j a  va  2 s  .  c o m
        try {
            mainReader = getReader();
            TermDocs td = mainReader.termDocs(new Term("ISROOT", "T"));
            while (td.next()) {
                int doc = td.doc();
                Document document = mainReader.document(doc);
                String id = document.get("ID");
                NodeRef ref = new NodeRef(id);
                deleteImpl(ref.toString(), getDeltaReader(), mainReader);
            }
            td.close();
        } catch (IOException e) {
            throw new LuceneIndexException("Failed to delete all primary nodes", e);
        }
    } finally {
        if (mainReader != null) {
            try {
                mainReader.close();
            } catch (IOException e) {
                throw new LuceneIndexException("Filed to close main reader", e);
            }
        }
        try {
            closeDeltaReader();
        } catch (Exception e) {
            s_logger.warn("Failed to close delta reader", e);
        }
    }
}

From source file:org.alfresco.repo.search.impl.lucene.ADMLuceneIndexerImpl.java

License:Open Source License

void doFTSIndexCommit() throws LuceneIndexException {
    IndexReader mainReader = null;
    IndexReader deltaReader = null;// www . jav  a  2 s . com
    IndexSearcher mainSearcher = null;
    IndexSearcher deltaSearcher = null;

    try {
        try {
            mainReader = getReader();
            deltaReader = getDeltaReader();
            mainSearcher = new IndexSearcher(mainReader);
            deltaSearcher = new IndexSearcher(deltaReader);

            for (Map.Entry<String, Deque<Helper>> entry : toFTSIndex.entrySet()) {
                // Delete both the document and the supplementary FTSSTATUS documents (if there are any)
                deletions.add(entry.getKey());
                for (Helper helper : entry.getValue()) {
                    deletions.add(helper.id);
                }
            }

        } finally {
            if (deltaSearcher != null) {
                try {
                    deltaSearcher.close();
                } catch (IOException e) {
                    s_logger.warn("Failed to close delta searcher", e);
                }
            }
            if (mainSearcher != null) {
                try {
                    mainSearcher.close();
                } catch (IOException e) {
                    s_logger.warn("Failed to close main searcher", e);
                }
            }
            try {
                closeDeltaReader();
            } catch (LuceneIndexException e) {
                s_logger.warn("Failed to close delta reader", e);
            }
            if (mainReader != null) {
                try {
                    mainReader.close();
                } catch (IOException e) {
                    s_logger.warn("Failed to close main reader", e);
                }
            }
        }

        setInfo(docs, getDeletions(), getContainerDeletions(), getDeleteOnlyNodes());
        // mergeDeltaIntoMain(new LinkedHashSet<Term>());
    } catch (IOException e) {
        // If anything goes wrong we try and do a roll back
        rollback();
        throw new LuceneIndexException("Commit failed", e);
    } catch (LuceneIndexException e) {
        // If anything goes wrong we try and do a roll back
        rollback();
        throw new LuceneIndexException("Commit failed", e);
    } finally {
        // Make sure we tidy up
        // deleteDelta();
    }

}

From source file:org.alfresco.repo.search.impl.lucene.ADMLuceneIndexerImpl.java

License:Open Source License

/**
 * @throws LuceneIndexException//from  w  w w  .  j  a  v a  2s  .  c om
 */
public void flushPending() throws LuceneIndexException {
    IndexReader mainReader = null;
    try {
        saveDelta();

        if (commandList.isEmpty()) {
            return;
        }

        Map<String, Action> nodeActionMap = new LinkedHashMap<String, Action>(commandList.size() * 2);

        // First, apply deletions and work out a 'flattened' list of reindex actions
        mainReader = getReader();
        IndexReader deltaReader = getDeltaReader();
        for (Command<NodeRef> command : commandList) {
            if (s_logger.isDebugEnabled()) {
                s_logger.debug(command.action + ": " + command.ref);
            }
            String nodeRef = command.ref.toString();
            switch (command.action) {
            case INDEX:
                // No deletions
                if (nodeActionMap.get(nodeRef) != Action.CASCADEREINDEX) {
                    nodeActionMap.put(nodeRef, Action.INDEX);
                }
                break;
            case REINDEX:
                // Remove from delta if present
                if (!deleteLeafOnly(nodeRef, deltaReader, true)) {
                    // Only mask out the node if it is present in the main index
                    deletions.add(nodeRef);
                }
                if (!nodeActionMap.containsKey(nodeRef)) {
                    nodeActionMap.put(nodeRef, Action.REINDEX);
                }
                break;
            case DELETE:
                // First ensure leaves with secondary references to the deleted node are reindexed
                Set<String> nodeRefSet = Collections.singleton(nodeRef);
                deleteReference(nodeRefSet, deltaReader, true);
                Set<String> temp = deleteReference(nodeRefSet, mainReader, false);
                if (!temp.isEmpty()) {
                    deletions.addAll(temp);
                    for (String ref : temp) {
                        if (!nodeActionMap.containsKey(ref)) {
                            nodeActionMap.put(ref, Action.REINDEX);
                        }
                    }
                }
                // Now fall through to cascade reindex case
            case CASCADEREINDEX:
                deleteContainerAndBelow(nodeRef, deltaReader, true, true);
                // Only mask out the container if it is present in the main index
                temp = deleteContainerAndBelow(nodeRef, mainReader, false, true);
                if (!temp.isEmpty()) {
                    containerDeletions.add(nodeRef);
                }
                // Only mask out the node if it is present in the main index
                if (temp.contains(nodeRef)) {
                    deletions.add(nodeRef);
                }
                nodeActionMap.put(nodeRef, Action.CASCADEREINDEX);
                break;
            }
        }

        // Now reindex what needs indexing!
        Set<Pair<Boolean, Path>> pathsToRegenerate = new LinkedHashSet<Pair<Boolean, Path>>(97);
        Map<NodeRef, List<ChildAssociationRef>> childAssociationsSinceFlush = new HashMap<NodeRef, List<ChildAssociationRef>>(
                97);

        // First do the reading
        List<Document> docs = new LinkedList<Document>();
        for (Map.Entry<String, Action> entry : nodeActionMap.entrySet()) {
            String nodeRef = entry.getKey();
            try {
                switch (entry.getValue()) {
                case INDEX:
                    docs.addAll(readDocuments(nodeRef, FTSStatus.New, false, true, false, pathsToRegenerate,
                            childAssociationsSinceFlush, deltaReader, mainReader));
                    break;
                case REINDEX:
                    docs.addAll(readDocuments(nodeRef, FTSStatus.Dirty, false, false, false, pathsToRegenerate,
                            childAssociationsSinceFlush, deltaReader, mainReader));
                    break;
                case CASCADEREINDEX:
                    // Add the nodes for index
                    docs.addAll(readDocuments(nodeRef, FTSStatus.Dirty, false, true, true, pathsToRegenerate,
                            childAssociationsSinceFlush, deltaReader, mainReader));
                    break;
                }
            } catch (InvalidNodeRefException e) {
                // The node does not exist
            }
        }
        closeDeltaReader();

        // Now the writings
        IndexWriter writer = getDeltaWriter();
        for (Document doc : docs) {
            try {
                writer.addDocument(doc);
            } catch (IOException e) {
                throw new LuceneIndexException("Failed to add document to index", e);
            }
        }

        // Regenerate all the required paths, accounting for cascading operations and avoiding duplicates
        for (final Pair<Boolean, Path> pathPair : pathsToRegenerate) {
            Document directoryEntry;
            // ETHREEOH-2014 / ALF-17681: dictionary access should be in context of tenant (eg. full reindex with MT
            // dynamic models)
            if (tenantService.isEnabled() && ((AuthenticationUtil.getRunAsUser() == null)
                    || (AuthenticationUtil.isRunAsUserTheSystemUser()))) {
                directoryEntry = AuthenticationUtil.runAs(new RunAsWork<Document>() {
                    public Document doWork() {
                        return regenerateDocumentPath(pathPair);
                    }
                }, tenantService.getDomainUser(AuthenticationUtil.getSystemUserName(),
                        tenantService.getDomain(this.store.getIdentifier())));
            } else {
                directoryEntry = regenerateDocumentPath(pathPair);
            }

            try {
                writer.addDocument(directoryEntry);
            } catch (IOException e) {
                throw new LuceneIndexException("Failed to add document to index", e);
            }
        }

        commandList.clear();
        this.docs = writer.docCount();
    } catch (IOException e) {
        // If anything goes wrong we try and do a roll back
        throw new LuceneIndexException("Failed to flush index", e);
    } finally {
        if (mainReader != null) {
            try {
                mainReader.close();
            } catch (IOException e) {
                throw new LuceneIndexException("Filed to close main reader", e);
            }
        }
        // Make sure deletes are sent
        try {
            closeDeltaReader();
        } catch (IOException e) {

        }
        // Make sure writes and updates are sent.
        try {
            closeDeltaWriter();
        } catch (IOException e) {

        }
    }
}