Example usage for org.apache.lucene.index IndexReader close

List of usage examples for org.apache.lucene.index IndexReader close

Introduction

In this page you can find the example usage for org.apache.lucene.index IndexReader close.

Prototype

@Override
public final synchronized void close() throws IOException 

Source Link

Document

Closes files associated with this index.

Usage

From source file:edu.ucdenver.ccp.nlp.index.Search.java

License:Apache License

/** Simple command-line based search demo. */
public static void main(String[] args) throws Exception {

    String index = "index";

    String queries = null;// w  ww .j  a v  a  2 s .c o  m

    String queryString = null;
    int hitsPerPage = 100;

    IndexReader reader = DirectoryReader.open(FSDirectory.open(new File(index)));
    IndexSearcher searcher = new IndexSearcher(reader);
    //Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_40);
    EnglishAnalyzer analyzer = new EnglishAnalyzer(Version.LUCENE_40);

    BufferedReader in = null;
    in = new BufferedReader(new InputStreamReader(System.in, "UTF-8"));
    //query building starts here.
    //QueryParser parser = new QueryParser(Version.LUCENE_40, "title", analyzer);
    MultiFieldQueryParser parser = new MultiFieldQueryParser(Version.LUCENE_40,
            new String[] { "title", "abs", "mentions" }, analyzer);

    while (true) {
        if (queries == null && queryString == null) { // prompt the user
            //c for cisplatin

            System.out.println("Enter query: ");
        }

        String line = queryString != null ? queryString : in.readLine();

        if (line == null || line.length() == -1) {
            break;
        }

        line = line.trim();
        if (line.length() == 0) {
            break;
        }

        //Query q = queryParser.parse(querystr);
        Query query = parser.parse(line);
        //System.out.println("Searching for: " + query.toString(field));

        TopScoreDocCollector collector = TopScoreDocCollector.create(hitsPerPage, true);
        searcher.search(query, collector);
        ScoreDoc[] hits = collector.topDocs().scoreDocs;
        // 4. display results
        System.out.println("Found " + hits.length + " hits.");

        for (int i = 0; i < hits.length; ++i) {
            int docId = hits[i].doc;
            Document d = searcher.doc(docId);
            System.out.println((i + 1) + ". " + d.get("pmid") + "\t" + d.get("title"));
        }

        //doPagingSearch(in, searcher, query, hitsPerPage, raw, queries == null && queryString == null);

        if (queryString != null) {
            break;
        }
    }
    reader.close();
}

From source file:edu.uci.isr.archstudio4.comp.tracelink.addtracelinks.RecoverLinkView.java

private void searchFiles(String searchTerm) {

    String index = "index";
    String field = "contents";
    //String queries = null;         //search term
    int repeat = 0;
    boolean raw = false;
    String normsField = null;//from   ww w  .  j  a  va2  s.  co  m

    try {

        IndexReader reader = IndexReader.open(index);
        if (normsField != null)
            reader = new OneNormsReader(reader, normsField);
        Searcher searcher = new IndexSearcher(reader);
        Analyzer analyzer = new StandardAnalyzer();

        QueryParser parser = new QueryParser(field, analyzer);
        Query query = parser.parse(searchTerm);
        System.out.println("Searching for: " + query.toString(field));

        Hits hits = searcher.search(query);

        System.out.println(hits.length() + " total matching documents");

        //H: added 
        TraceLink link = new TraceLink();
        link.setDescription("Recovered links");
        link.setRelationship("unknown");
        TraceEndpoint te = new TraceEndpoint();
        te.setCaptureMode("recovered");
        te.setAuthor(System.getProperty("user.name"));
        te.setLocationType("simple");
        //te.setLocationHref(tracelinkController.getEndpointHref());
        te.setLocationHref(selectedEndpointHref);
        Date timestamp = new Date();
        te.setTimestamp(timestamp);
        link.addEndpoint(te);

        String endpointPaths = "";
        for (int i = 0; i < hits.length(); i++) {

            if (raw) { // output raw format
                System.out.println("doc=" + hits.id(i) + " score=" + hits.score(i));
                continue;
            }

            Document doc = hits.doc(i);
            String path = doc.get("path");

            //H: added
            te = new TraceEndpoint();
            te.setCaptureMode("recovered");
            te.setAuthor(System.getProperty("user.name"));
            te.setLocationType("simple");
            te.setLocationHref(path);
            endpointPaths += path + "\n";
            timestamp = new Date();
            te.setTimestamp(timestamp);
            link.addEndpoint(te);

            if (path != null) {
                System.out.println((i + 1) + ". " + path);
                String title = doc.get("title");
                if (title != null) {
                    System.out.println("   Title: " + doc.get("title"));
                }
            } else {
                System.out.println((i + 1) + ". " + "No path for this document");
            }

        }

        String msgBoxTitle = "Recover Tracelinks";
        if (hits.length() > 0) {
            MsgBox mbox = new MsgBox(shell, SWT.YES); //for yes/no msgbox
            System.out.println("paths: " + endpointPaths);
            int result = mbox.displayMsgBox("Add the following recovered links?" + "\n" + endpointPaths,
                    msgBoxTitle);
            if (result == SWT.YES) {
                //tracelinkController.addTraceLinks(link);
                xadlFacade.addTraceLinks(link);
                //tracelinkController.updateViews();
            }

        } else { //handle no results
            MsgBox mbox = new MsgBox(shell, SWT.OK);
            mbox.displayMsgBox("No recovered links", msgBoxTitle);
        }
        reader.close();

    } catch (IOException exc) {
        exc.printStackTrace();
    } catch (ParseException pexc) {

    }
}

From source file:edu.ur.ir.groupspace.service.DefaultGroupWorkspaceSearchService.java

License:Apache License

public SearchResults<GroupWorkspace> search(File indexFolder, String query, int offset, int numResults) {
    SearchResults<GroupWorkspace> searchResults = new SearchResults<GroupWorkspace>();
    searchResults.setOriginalQuery(query);
    query = SearchHelper.prepareMainSearchString(query, true);
    ArrayList<GroupWorkspace> groupWorkspaces = new ArrayList<GroupWorkspace>();
    if (log.isDebugEnabled()) {
        log.debug(//from  w  w  w.  ja v a 2  s.  c o  m
                "User search results executing query " + query + " on index " + indexFolder.getAbsolutePath());
    }

    IndexSearcher searcher = null;
    IndexReader reader = null;
    try {
        FSDirectory directory = FSDirectory.open(indexFolder);
        reader = IndexReader.open(directory, true);
        searcher = new IndexSearcher(reader);
        QueryParser parser = new MultiFieldQueryParser(Version.LUCENE_35, fields, analyzer);
        parser.setDefaultOperator(QueryParser.AND_OPERATOR);

        Query luceneQuery = parser.parse(query);
        TopDocs hits = searcher.search(luceneQuery, 1000);
        searchResults.setTotalHits(hits.totalHits);

        int position = offset;
        int addedResults = 0;
        while (hits.totalHits > position && (addedResults < numResults)) {
            if (log.isDebugEnabled()) {
                log.debug(" adding document at position " + position);

            }

            Document d = searcher.doc(hits.scoreDocs[position].doc);
            Long groupWorkspaceId = NumericUtils.prefixCodedToLong(d.get(DefaultGroupWorkspaceIndexService.ID));
            log.debug("group workspace id = " + groupWorkspaceId);
            GroupWorkspace groupWorkspace = groupWorkspaceService.get(groupWorkspaceId, false);
            groupWorkspaces.add(groupWorkspace);
            addedResults += 1;
            position += 1;
        }
    } catch (Exception e) {
        log.error(e);
    } finally {
        if (searcher != null) {
            try {
                searcher.close();
            } catch (IOException e) {
                log.error("the searcher could not be closed", e);
            }
        }
        if (reader != null) {
            try {
                reader.close();
            } catch (IOException e) {
                log.error("the reader could not be closed", e);
            }
        }
    }
    searchResults.setObjects(groupWorkspaces);
    return searchResults;
}

From source file:edu.ur.ir.institution.service.DefaultInstitutionalCollectionSearchService.java

License:Apache License

/**
 * Returns search results for institutional collections.
 * /*from w  ww.j  a va 2s . c  o m*/
 * @param institutionalCollectionIndexFolder - folder for the institutional collections
 * @param query - query to execute
 * @param offset - offset to start at
 * @param numResults - number of results.
 * 
 * @return - set of users found for the query.
 */
public SearchResults<InstitutionalCollection> search(File institutionalCollectionIndexFolder, String query,
        int offset, int numResults) {
    SearchResults<InstitutionalCollection> searchResults = new SearchResults<InstitutionalCollection>();
    searchResults.setOriginalQuery(query);
    query = SearchHelper.prepareMainSearchString(query, true);
    ArrayList<InstitutionalCollection> collections = new ArrayList<InstitutionalCollection>();
    if (log.isDebugEnabled()) {
        log.debug("User search results executing query " + query + " on index "
                + institutionalCollectionIndexFolder.getAbsolutePath());
    }

    IndexSearcher searcher = null;
    IndexReader reader = null;
    try {
        FSDirectory directory = FSDirectory.open(institutionalCollectionIndexFolder);
        reader = IndexReader.open(directory, true);
        searcher = new IndexSearcher(reader);

        QueryParser parser = new MultiFieldQueryParser(Version.LUCENE_35, fields, analyzer);
        parser.setDefaultOperator(QueryParser.AND_OPERATOR);

        Query luceneQuery = parser.parse(query);
        TopDocs hits = searcher.search(luceneQuery, 1000);

        searchResults.setTotalHits(hits.totalHits);

        int position = offset;
        int addedResults = 0;
        while (hits.totalHits > position && (addedResults < numResults)) {
            if (log.isDebugEnabled()) {
                log.debug(" adding document at position " + position);

            }

            Document d = searcher.doc(hits.scoreDocs[position].doc);

            Long collectionId = NumericUtils
                    .prefixCodedToLong(d.get(DefaultInstitutionalCollectionIndexService.ID));
            ;
            if (log.isDebugEnabled()) {
                log.debug("collection id = " + collectionId);
            }

            InstitutionalCollection collection = institutionalCollectionService.getCollection(collectionId,
                    false);
            collections.add(collection);
            addedResults += 1;
            position += 1;
        }
    } catch (Exception e) {
        log.error(e);
    } finally {
        if (searcher != null) {
            try {
                searcher.close();
            } catch (IOException e) {
                log.error("the searcher could not be closed", e);
            }
        }
        if (reader != null) {
            try {
                reader.close();
            } catch (IOException e) {
                log.error("the reader could not be closed", e);
            }
        }
    }
    searchResults.setObjects(collections);
    return searchResults;
}

From source file:edu.ur.ir.institution.service.DefaultInstitutionalItemIndexServiceTest.java

License:Apache License

/**
 * Executes the query returning the number of hits.
 * // w w  w  .  java 2 s . c om
 * @param field - field to search on
 * @param queryString - query string
 * @param dir - lucene index to search
 * 
 * @return - number of hits
 * 
 * @throws CorruptIndexException
 * @throws IOException
 * @throws ParseException
 */
private int executeQuery(String field, String queryString, Directory dir)
        throws CorruptIndexException, IOException, ParseException {

    IndexReader reader = IndexReader.open(dir, true);
    IndexSearcher searcher = new IndexSearcher(reader);
    QueryParser parser = new QueryParser(Version.LUCENE_35, field, new StandardAnalyzer(Version.LUCENE_35));
    Query q1 = parser.parse(queryString);
    TopDocs hits = searcher.search(q1, 1000);
    int hitCount = hits.totalHits;

    searcher.close();
    reader.close();

    return hitCount;
}

From source file:edu.ur.ir.institution.service.DefaultInstitutionalItemSearchService.java

License:Apache License

/**
 * Get the facets and results/* ww  w  . j  av a 2 s.co m*/
 * @see edu.ur.ir.institution.InstitutionalItemSearchService#executeSearchWithFacets(java.lang.String, java.lang.String, int, int, int, int)
 */
public FacetSearchHelper executeSearchWithFacets(String mainQueryString, String indexFolder,
        int numberOfHitsToProcessForFacets, int numberOfResultsToCollectForFacets, int numberOfFactsToShow,
        int numberOfIdsToCollect, int idsToCollectStartPosition)
        throws CorruptIndexException, IOException, ParseException {
    log.debug("orginal query 4 = " + mainQueryString);
    if (searchDirectoryIsEmpty(indexFolder) || isInvalidQuery(mainQueryString)) {
        return new FacetSearchHelper(new HashSet<Long>(), 0, new HashMap<String, Collection<FacetResult>>(),
                mainQueryString);
    }

    FSDirectory directory = FSDirectory.open(new File(indexFolder));
    IndexReader reader = IndexReader.open(directory, true);
    IndexSearcher searcher = new IndexSearcher(reader);

    QueryParser parser = new MultiFieldQueryParser(Version.LUCENE_35, fields, analyzer, getBoostedFields());
    parser.setDefaultOperator(QueryParser.AND_OPERATOR);

    // execute the main query - we will use this to extract data to determine the facet searches
    // the search helper MUST BE SET TO FALSE if diacritic based searches are to work
    // putting a * following a diacritic does not work
    String executedQuery = SearchHelper.prepareMainSearchString(mainQueryString, false);
    Query mainQuery = parser.parse(executedQuery);
    if (log.isDebugEnabled()) {
        log.debug("main query = " + executedQuery);
        log.debug(
                "main query parsed = " + mainQuery + " maxNumberOfMainQueryHits = " + maxNumberOfMainQueryHits);
    }

    TopDocs topDocs = searcher.search(mainQuery, maxNumberOfMainQueryHits);

    // determine the set of data we should use to determine facets
    HashMap<String, HashMap<String, FacetResult>> possibleFacets = this.generateFacetSearches(topDocs,
            numberOfHitsToProcessForFacets, numberOfResultsToCollectForFacets, searcher);

    QueryWrapperFilter mainQueryWrapper = new QueryWrapperFilter(mainQuery);
    log.debug("executeSearchWithFacets 1 query = " + mainQuery);
    DocIdSet mainQueryBits = mainQueryWrapper.getDocIdSet(reader);
    OpenBitSetDISI mainQueryBitSet = new OpenBitSetDISI(mainQueryBits.iterator(), reader.maxDoc());

    HashMap<String, Collection<FacetResult>> facetResults = new HashMap<String, Collection<FacetResult>>();

    // process the data and determine the facets
    FacetSearchHelper helper = processPossibleFacets(possibleFacets, reader, mainQueryBitSet, facetResults,
            topDocs, numberOfIdsToCollect, idsToCollectStartPosition, numberOfFactsToShow, mainQueryString,
            searcher);
    helper.setExecutedQuery(executedQuery);
    searcher.close();
    reader.close();
    return helper;
}

From source file:edu.ur.ir.institution.service.DefaultInstitutionalItemSearchService.java

License:Apache License

/**
 * Execute the search with a set of facet filters
 * /*w ww  .j a v a 2s  .co m*/
 * @see edu.ur.ir.repository.InstitutionalItemSearchService#executeSearchWithFacets(java.lang.String, java.util.Set, java.lang.String, int, int, int)
 */
public FacetSearchHelper executeSearchWithFacets(String mainQueryString, List<FacetFilter> filters,
        String indexFolder, int numberOfHitsToProcessForFacets, int numberOfResultsToCollectForFacets,
        int numberOfFactsToShow, int numberOfIdsToCollect, int idsToCollectStartPosition)
        throws CorruptIndexException, IOException, ParseException {

    log.debug("orignal query 3 = " + mainQueryString);

    // return if the main query is invalid
    if (searchDirectoryIsEmpty(indexFolder) || isInvalidQuery(mainQueryString)) {
        return new FacetSearchHelper(new HashSet<Long>(), 0, new HashMap<String, Collection<FacetResult>>(),
                mainQueryString);
    }

    FSDirectory directory = FSDirectory.open(new File(indexFolder));
    IndexReader reader = IndexReader.open(directory, true);
    IndexSearcher searcher = new IndexSearcher(reader);

    QueryParser parser = new MultiFieldQueryParser(Version.LUCENE_35, fields, analyzer, getBoostedFields());
    parser.setDefaultOperator(QueryParser.AND_OPERATOR);

    // execute the main query - we will use this to extract data to determine the facet searches
    // the search helper MUST BE SET TO FALSE if diacritic based searches are to work
    // putting a * following a diacritic does not work
    String executedQuery = SearchHelper.prepareMainSearchString(mainQueryString, false);

    if (log.isDebugEnabled()) {
        log.debug("parsed query = " + executedQuery.trim());
    }
    Query mainQuery = parser.parse(executedQuery);

    //create a filter for the main query
    QueryWrapperFilter mainQueryWrapper = new QueryWrapperFilter(mainQuery);

    // get the bitset for main query
    DocIdSet mainQueryBits = mainQueryWrapper.getDocIdSet(reader);
    OpenBitSetDISI mainQueryBitSet = new OpenBitSetDISI(mainQueryBits.iterator(), reader.maxDoc());
    TopDocs hits = null;
    if (filters.size() > 0) {
        // create a filter that will match the main query plus all other filters
        List<Filter> luceneFilters = getSubQueryFilters(filters, searcher);
        Filter filter = new ChainedFilter(luceneFilters.toArray(new Filter[luceneFilters.size()]),
                ChainedFilter.AND);
        if (log.isDebugEnabled()) {
            log.debug("filter = " + filter);
        }

        // apply the facets and include them in the main query bit set
        DocIdSet filterQueryBits = filter.getDocIdSet(reader);

        OpenBitSetDISI filterBitSet = new OpenBitSetDISI(filterQueryBits.iterator(), reader.maxDoc());
        mainQueryBitSet.and(filterBitSet);

        hits = searcher.search(mainQuery, filter, maxNumberOfMainQueryHits);
        log.debug(" executeSearchWithFacets 2 = mainQuery = " + executedQuery + " filter = " + filter);
    } else {
        hits = searcher.search(mainQuery, maxNumberOfMainQueryHits);
        log.debug(" executeSearchWithFacets 3 = mainQuery = " + mainQuery);

    }

    // determine the set of data we should use to determine facets
    HashMap<String, HashMap<String, FacetResult>> possibleFacets = this.generateFacetSearches(hits,
            numberOfHitsToProcessForFacets, numberOfResultsToCollectForFacets, searcher);

    HashMap<String, Collection<FacetResult>> facetResults = new HashMap<String, Collection<FacetResult>>();
    FacetSearchHelper helper = processPossibleFacets(possibleFacets, reader, mainQueryBitSet, facetResults,
            hits, numberOfIdsToCollect, idsToCollectStartPosition, numberOfFactsToShow, mainQueryString,
            searcher);

    helper.setExecutedQuery(executedQuery);
    helper.setFacetTrail(filters);

    searcher.close();
    reader.close();
    return helper;
}

From source file:edu.ur.ir.institution.service.DefaultInstitutionalItemSearchService.java

License:Apache License

public FacetSearchHelper executeSearchWithFacets(String mainQueryString, List<FacetFilter> filters,
        String indexFolder, int numberOfHitsToProcessForFacets, int numberOfResultsToCollectForFacets,
        int numberOfFactsToShow, int numberOfIdsToCollect, int idsToCollectStartPosition,
        InstitutionalCollection collection) throws CorruptIndexException, IOException, ParseException {

    log.debug("orginal query 2 = " + mainQueryString);
    if (searchDirectoryIsEmpty(indexFolder) || isInvalidQuery(mainQueryString)) {
        return new FacetSearchHelper(new HashSet<Long>(), 0, new HashMap<String, Collection<FacetResult>>(),
                mainQueryString);/*from w  w w  .j  a v  a2 s .  c o  m*/
    }

    FSDirectory directory = FSDirectory.open(new File(indexFolder));
    IndexReader reader = IndexReader.open(directory, true);
    IndexSearcher searcher = new IndexSearcher(reader);

    QueryParser parser = new MultiFieldQueryParser(Version.LUCENE_35, fields, analyzer, getBoostedFields());
    parser.setDefaultOperator(QueryParser.AND_OPERATOR);

    // execute the main query - we will use this to extract data to determine the facet searches
    // the search helper MUST BE SET TO FALSE if diacritic based searches are to work
    // putting a * following a diacritic does not work
    String executedQuery = SearchHelper.prepareMainSearchString(mainQueryString, false);
    Query mainQuery = parser.parse(executedQuery);

    if (log.isDebugEnabled()) {
        log.debug("parsed query = " + executedQuery);
    }
    //create a filter for the main query
    QueryWrapperFilter mainQueryWrapper = new QueryWrapperFilter(mainQuery);

    // get the bitset for main query
    DocIdSet mainQueryBits = mainQueryWrapper.getDocIdSet(reader);

    List<Filter> luceneFilters = new LinkedList<Filter>();

    if (filters.size() > 0) {
        // create a filter that will match the main query plus all other filters
        luceneFilters.addAll(getSubQueryFilters(filters, searcher));
    }
    // add filters for the collection first
    luceneFilters.addAll(0, getCollectionFilters(collection));

    Filter filter = new ChainedFilter(luceneFilters.toArray(new Filter[luceneFilters.size()]),
            ChainedFilter.AND);

    if (log.isDebugEnabled()) {
        log.debug("filter = " + filter);
    }

    // get the filter query doc id set
    DocIdSet filterQueryBits = filter.getDocIdSet(reader);

    // apply the facets and include them in the main query bit set
    OpenBitSetDISI mainQueryBitSet = new OpenBitSetDISI(mainQueryBits.iterator(), reader.maxDoc());
    OpenBitSetDISI filterBitSet = new OpenBitSetDISI(filterQueryBits.iterator(), reader.maxDoc());
    mainQueryBitSet.and(filterBitSet);

    TopDocs hits = searcher.search(mainQuery, filter, maxNumberOfMainQueryHits);
    log.debug(" executeSearchWithFacets 4 = mainQuery = " + mainQuery + " filter = " + filter
            + "maxNumberOfMainQueryHits = " + maxNumberOfMainQueryHits);

    // determine the set of data we should use to determine facets
    HashMap<String, HashMap<String, FacetResult>> possibleFacets = this.generateFacetSearches(hits,
            numberOfHitsToProcessForFacets, numberOfResultsToCollectForFacets, searcher);

    HashMap<String, Collection<FacetResult>> facetResults = new HashMap<String, Collection<FacetResult>>();
    FacetSearchHelper helper = processPossibleFacets(possibleFacets, reader, mainQueryBitSet, facetResults,
            hits, numberOfIdsToCollect, idsToCollectStartPosition, numberOfFactsToShow, mainQueryString,
            searcher);

    helper.setFacetTrail(filters);
    helper.setExecutedQuery(executedQuery);
    searcher.close();
    reader.close();
    return helper;
}

From source file:edu.ur.ir.institution.service.DefaultInstitutionalItemSearchService.java

License:Apache License

public FacetSearchHelper executeSearchWithFacets(String mainQueryString, String indexFolder,
        int numberOfHitsToProcessForFacets, int numberOfResultsToCollectForFacets, int numberOfFactsToShow,
        int numberOfIdsToCollect, int idsToCollectStartPosition, InstitutionalCollection collection)
        throws CorruptIndexException, IOException, ParseException {
    log.debug("orginal query 1= " + mainQueryString);
    log.debug("execute search with facets for a collection");
    if (searchDirectoryIsEmpty(indexFolder) || isInvalidQuery(mainQueryString)) {
        log.debug("problem with search!");
        return new FacetSearchHelper(new HashSet<Long>(), 0, new HashMap<String, Collection<FacetResult>>(),
                mainQueryString);//from   ww  w  .j  a  va  2  s.  c  o  m
    }

    FSDirectory directory = FSDirectory.open(new File(indexFolder));
    IndexReader reader = IndexReader.open(directory, true);
    IndexSearcher searcher = new IndexSearcher(reader);

    QueryParser parser = new MultiFieldQueryParser(Version.LUCENE_35, fields, analyzer, getBoostedFields());
    parser.setDefaultOperator(QueryParser.AND_OPERATOR);

    // execute the main query - we will use this to extract data to determine the facet searches
    // the search helper MUST BE SET TO FALSE if diacritic based searches are to work
    // putting a * following a diacritic does not work
    String executedQuery = SearchHelper.prepareMainSearchString(mainQueryString, false);
    Query mainQuery = parser.parse(executedQuery);

    if (log.isDebugEnabled()) {
        log.debug("Executed query = " + executedQuery);
    }

    Filter[] aFilters = this.getCollectionFilters(collection).toArray(new Filter[2]);

    Filter chainedFilter = new ChainedFilter(aFilters, ChainedFilter.AND);

    //create a filter for the main query
    QueryWrapperFilter mainQueryWrapper = new QueryWrapperFilter(mainQuery);

    // get the bitset for main query
    DocIdSet mainQueryBits = mainQueryWrapper.getDocIdSet(reader);

    // get the filter query doc id set
    DocIdSet filterQueryBits = chainedFilter.getDocIdSet(reader);

    // apply the filters for the collection root and range
    OpenBitSetDISI mainQueryBitSet = new OpenBitSetDISI(mainQueryBits.iterator(), reader.maxDoc());
    OpenBitSetDISI filterBitSet = new OpenBitSetDISI(filterQueryBits.iterator(), reader.maxDoc());
    mainQueryBitSet.and(filterBitSet);

    log.debug(" executeSearchWithFacets 5 = mainQuery = " + mainQuery + " filter = " + chainedFilter);
    TopDocs hits = searcher.search(mainQuery, chainedFilter, maxNumberOfMainQueryHits);

    // determine the set of data we should use to determine facets
    HashMap<String, HashMap<String, FacetResult>> possibleFacets = this.generateFacetSearches(hits,
            numberOfHitsToProcessForFacets, numberOfResultsToCollectForFacets, searcher);

    HashMap<String, Collection<FacetResult>> facetResults = new HashMap<String, Collection<FacetResult>>();
    // process the data and determine the facets
    FacetSearchHelper helper = processPossibleFacets(possibleFacets, reader, mainQueryBitSet, facetResults,
            hits, numberOfIdsToCollect, idsToCollectStartPosition, numberOfFactsToShow, mainQueryString,
            searcher);

    helper.setExecutedQuery(executedQuery);
    searcher.close();
    reader.close();
    return helper;
}

From source file:edu.ur.ir.person.service.DefaultNameAuthorityIndexServiceTest.java

License:Apache License

/**
 * Executes the query returning the number of hits.
 * //www.jav  a2 s  .  c  o m
 * @param field - field to search on
 * @param queryString - query string
 * @param dir - lucene index to search
 * 
 * @return - number of hits
 * 
 * @throws CorruptIndexException
 * @throws IOException
 * @throws ParseException
 */
private int executeQuery(String field, String queryString, Directory dir)
        throws CorruptIndexException, IOException, ParseException {
    IndexReader reader = IndexReader.open(dir, true);
    IndexSearcher searcher = new IndexSearcher(reader);
    QueryParser parser = new QueryParser(Version.LUCENE_35, field, new StandardAnalyzer(Version.LUCENE_35));
    Query q1 = parser.parse(queryString);
    TopDocs hits = searcher.search(q1, 1000);
    int hitCount = hits.totalHits;

    searcher.close();
    reader.close();

    return hitCount;
}