Example usage for org.apache.lucene.search IndexSearcher doc

List of usage examples for org.apache.lucene.search IndexSearcher doc

Introduction

In this page you can find the example usage for org.apache.lucene.search IndexSearcher doc.

Prototype

public Document doc(int docID) throws IOException 

Source Link

Document

Sugar for .getIndexReader().document(docID)

Usage

From source file:edu.rpi.tw.linkipedia.search.utils.Utils.java

License:Open Source License

public static Document readIndexByTerm(IndexSearcher searcher, String fieldString, String termString)
        throws CorruptIndexException, IOException {
    TermQuery query = new TermQuery(new Term(fieldString, termString));
    //System.out.println(query.toString());
    TopDocs topDocs = searcher.search(query, 1);
    ScoreDoc[] hits = topDocs.scoreDocs;
    for (int i = 0; i < hits.length; i++) {
        int docId = hits[i].doc;
        return searcher.doc(docId);
    }/*from  w w  w .  j a  v a2  s. com*/

    /*
    Term term = new Term(fieldString, termString);
    TermDocs docs = reader.termDocs(term);
    if(docs.next()){   
       int docId = docs.doc();
       Document doc = reader.document(docId);
       return doc;
    }
    */
    return null;
}

From source file:edu.siena.cs.sawus.search.Searcher.java

License:Apache License

/**
 * This demonstrates a typical paging search scenario, where the search engine presents 
 * pages of size n to the user. The user can then go to the next page if interested in
 * the next hits./*w ww . ja  va 2 s  .  c o  m*/
 * 
 * When the query is executed for the first time, then only enough results are collected
 * to fill 5 result pages. If the user wants to page beyond this limit, then the query
 * is executed another time and all hits are collected.
 * 
 */
public static void doPagingSearch(BufferedReader in, IndexSearcher searcher, Query query, int hitsPerPage,
        boolean raw, boolean interactive) throws IOException {

    // Collect enough docs to show 5 pages
    TopDocs results = searcher.search(query, 5 * hitsPerPage);
    ScoreDoc[] hits = results.scoreDocs;

    int numTotalHits = results.totalHits;
    System.out.println(numTotalHits + " total matching documents");

    int start = 0;
    int end = numTotalHits;

    hits = searcher.search(query, numTotalHits).scoreDocs;

    for (int i = start; i < end; i++) {

        Document doc = searcher.doc(hits[i].doc);
        String path = doc.get("file");
        if (path != null) {
            System.out.println((i + 1) + ". " + path);
            String docID = doc.get("doc_id");
            if (docID != null) {
                System.out.println("DocID:\t" + docID);
            }
            String title = doc.get("title");
            if (title != null) {
                System.out.println("Title:\t" + doc.get("title").replaceAll("\n", " "));
            }
            String content = doc.get("contents");
            if (content != null) {
                System.out.println(content);
            }
        } else {
            System.out.println((i + 1) + ". " + "No path for this document");
        }

    }
}

From source file:edu.ucdenver.ccp.nlp.index.Search.java

License:Apache License

/** Simple command-line based search demo. */
public static void main(String[] args) throws Exception {

    String index = "index";

    String queries = null;/*from   w w w .  j  a va 2s .c  o m*/

    String queryString = null;
    int hitsPerPage = 100;

    IndexReader reader = DirectoryReader.open(FSDirectory.open(new File(index)));
    IndexSearcher searcher = new IndexSearcher(reader);
    //Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_40);
    EnglishAnalyzer analyzer = new EnglishAnalyzer(Version.LUCENE_40);

    BufferedReader in = null;
    in = new BufferedReader(new InputStreamReader(System.in, "UTF-8"));
    //query building starts here.
    //QueryParser parser = new QueryParser(Version.LUCENE_40, "title", analyzer);
    MultiFieldQueryParser parser = new MultiFieldQueryParser(Version.LUCENE_40,
            new String[] { "title", "abs", "mentions" }, analyzer);

    while (true) {
        if (queries == null && queryString == null) { // prompt the user
            //c for cisplatin

            System.out.println("Enter query: ");
        }

        String line = queryString != null ? queryString : in.readLine();

        if (line == null || line.length() == -1) {
            break;
        }

        line = line.trim();
        if (line.length() == 0) {
            break;
        }

        //Query q = queryParser.parse(querystr);
        Query query = parser.parse(line);
        //System.out.println("Searching for: " + query.toString(field));

        TopScoreDocCollector collector = TopScoreDocCollector.create(hitsPerPage, true);
        searcher.search(query, collector);
        ScoreDoc[] hits = collector.topDocs().scoreDocs;
        // 4. display results
        System.out.println("Found " + hits.length + " hits.");

        for (int i = 0; i < hits.length; ++i) {
            int docId = hits[i].doc;
            Document d = searcher.doc(docId);
            System.out.println((i + 1) + ". " + d.get("pmid") + "\t" + d.get("title"));
        }

        //doPagingSearch(in, searcher, query, hitsPerPage, raw, queries == null && queryString == null);

        if (queryString != null) {
            break;
        }
    }
    reader.close();
}

From source file:edu.ucdenver.ccp.nlp.index.Search.java

License:Apache License

/**
 * @param in/*from www  . j  av a  2 s .c o m*/
 * @param searcher
 * @param query
 * @param hitsPerPage
 * @param raw
 * @param interactive
 * @throws IOException
 */
public static void doPagingSearch(BufferedReader in, IndexSearcher searcher, Query query, int hitsPerPage,
        boolean raw, boolean interactive) throws IOException {

    // Collect enough docs to show 5 pages
    TopDocs results = searcher.search(query, 5 * hitsPerPage);
    ScoreDoc[] hits = results.scoreDocs;

    int numTotalHits = results.totalHits;
    System.out.println(numTotalHits + " total matching documents");

    int start = 0;
    int end = Math.min(numTotalHits, hitsPerPage);

    while (true) {
        if (end > hits.length) {
            System.out.println("Only results 1 - " + hits.length + " of " + numTotalHits
                    + " total matching documents collected.");
            System.out.println("Collect more (y/n) ?");
            String line = in.readLine();
            if (line.length() == 0 || line.charAt(0) == 'n') {
                break;
            }

            hits = searcher.search(query, numTotalHits).scoreDocs;
        }

        end = Math.min(hits.length, start + hitsPerPage);

        for (int i = start; i < end; i++) {
            if (raw) { // output raw format
                System.out.println("doc=" + hits[i].doc + " score=" + hits[i].score);
                continue;
            }

            Document doc = searcher.doc(hits[i].doc);
            //String path = doc.get("path");
            //if (path != null) {
            System.out.print((i + 1));
            String title = doc.get("title");
            String pmid = doc.get("pmid");

            if (title != null || pmid != null) {
                System.out.println(doc.get("pmid") + " " + doc.get("title"));
            }
            //}
            else {
                System.out.println((i + 1) + ". " + "No path for this document");
            }

        }

        if (!interactive || end == 0) {
            break;
        }

        if (numTotalHits >= end) {
            boolean quit = false;
            while (true) {
                System.out.print("Press ");
                if (start - hitsPerPage >= 0) {
                    System.out.print("(p)revious page, ");
                }
                if (start + hitsPerPage < numTotalHits) {
                    System.out.print("(n)ext page, ");
                }
                System.out.println("(q)uit or enter number to jump to a page.");

                String line = in.readLine();
                if (line.length() == 0 || line.charAt(0) == 'q') {
                    quit = true;
                    break;
                }
                if (line.charAt(0) == 'p') {
                    start = Math.max(0, start - hitsPerPage);
                    break;
                } else if (line.charAt(0) == 'n') {
                    if (start + hitsPerPage < numTotalHits) {
                        start += hitsPerPage;
                    }
                    break;
                } else {
                    int page = Integer.parseInt(line);
                    if ((page - 1) * hitsPerPage < numTotalHits) {
                        start = (page - 1) * hitsPerPage;
                        break;
                    } else {
                        System.out.println("No such page");
                    }
                }
            }
            if (quit)
                break;
            end = Math.min(numTotalHits, start + hitsPerPage);
        }
    }
}

From source file:edu.uci.ics.cs221wiki.SearchFiles.java

License:Apache License

/**
 * This demonstrates a typical paging search scenario, where the search engine presents 
 * pages of size n to the user. The user can then go to the next page if interested in
 * the next hits.//from w ww  .  j a v  a  2 s. co m
 * 
 * When the query is executed for the first time, then only enough results are collected
 * to fill 5 result pages. If the user wants to page beyond this limit, then the query
 * is executed another time and all hits are collected.
 * 
 */
public static void doPagingSearch(BufferedReader in, IndexSearcher searcher, Query query, int hitsPerPage,
        boolean raw, boolean interactive) throws IOException {

    try {
        // Collect enough docs to show 5 pages
        TopDocs results = searcher.search(query, 5 * hitsPerPage);
        ScoreDoc[] hits = results.scoreDocs;

        int numTotalHits = results.totalHits;
        System.out.println(numTotalHits + " total matching documents");

        int start = 0;
        int end = Math.min(numTotalHits, hitsPerPage);

        while (true) {
            if (end > hits.length) {
                System.out.println("Only results 1 - " + hits.length + " of " + numTotalHits
                        + " total matching documents collected.");
                System.out.println("Collect more (y/n) ?");
                String line = in.readLine();
                if (line.length() == 0 || line.charAt(0) == 'n') {
                    break;
                }

                hits = searcher.search(query, numTotalHits).scoreDocs;
            }

            end = Math.min(hits.length, start + hitsPerPage);

            for (int i = start; i < end; i++) {
                if (raw) { // output raw format
                    System.out.println("doc=" + hits[i].doc + " score=" + hits[i].score);
                    continue;
                }

                Document doc = searcher.doc(hits[i].doc);
                String path = doc.get("path");
                if (path != null) {
                    System.out.println((i + 1) + ". " + path);
                    String title = doc.get("title");
                    if (title != null) {
                        System.out.println("   Title: " + doc.get("title"));
                    }
                } else {
                    System.out.println((i + 1) + ". " + "No path for this document");
                }

            }

            if (!interactive || end == 0) {
                break;
            }

            if (numTotalHits >= end) {
                boolean quit = false;
                while (true) {
                    System.out.print("Press ");
                    if (start - hitsPerPage >= 0) {
                        System.out.print("(p)revious page, ");
                    }
                    if (start + hitsPerPage < numTotalHits) {
                        System.out.print("(n)ext page, ");
                    }
                    System.out.println("(q)uit or enter number to jump to a page.");

                    String line = in.readLine();
                    if (line.length() == 0 || line.charAt(0) == 'q') {
                        quit = true;
                        break;
                    }
                    if (line.charAt(0) == 'p') {
                        start = Math.max(0, start - hitsPerPage);
                        break;
                    } else if (line.charAt(0) == 'n') {
                        if (start + hitsPerPage < numTotalHits) {
                            start += hitsPerPage;
                        }
                        break;
                    } else {
                        int page = Integer.parseInt(line);
                        if ((page - 1) * hitsPerPage < numTotalHits) {
                            start = (page - 1) * hitsPerPage;
                            break;
                        } else {
                            System.out.println("No such page");
                        }
                    }
                }
                if (quit)
                    break;
                end = Math.min(numTotalHits, start + hitsPerPage);
            }
        }
    } catch (Exception e) {

    }
}

From source file:edu.uci.ics.searcher.SearchFiles.java

License:Apache License

/**
 * This demonstrates a typical paging search scenario, where the search engine presents 
 * pages of size n to the user. The user can then go to the next page if interested in
 * the next hits.// w  w  w. ja va 2s.c o  m
 * 
 * When the query is executed for the first time, then only enough results are collected
 * to fill 5 result pages. If the user wants to page beyond this limit, then the query
 * is executed another time and all hits are collected.
 * 
 */
public static void doPagingSearch(BufferedReader in, IndexSearcher searcher, Query query, int hitsPerPage,
        boolean raw, boolean interactive) throws IOException {

    // Collect enough docs to show 5 pages
    TopDocs results = searcher.search(query, 5 * hitsPerPage);
    ScoreDoc[] hits = results.scoreDocs;

    int numTotalHits = results.totalHits;
    System.out.println(numTotalHits + " total matching documents");

    int start = 0;
    int end = Math.min(numTotalHits, hitsPerPage);

    while (true) {
        if (end > hits.length) {
            System.out.println("Only results 1 - " + hits.length + " of " + numTotalHits
                    + " total matching documents collected.");
            System.out.println("Collect more (y/n) ?");
            String line = in.readLine();
            if (line.length() == 0 || line.charAt(0) == 'n') {
                break;
            }

            hits = searcher.search(query, numTotalHits).scoreDocs;
        }

        end = Math.min(hits.length, start + hitsPerPage);

        for (int i = start; i < end; i++) {
            if (raw) { // output raw format
                System.out.println("doc=" + hits[i].doc + " score=" + hits[i].score);
                continue;
            }

            Document doc = searcher.doc(hits[i].doc);
            String url = doc.get("url");
            if (url != null) {
                System.out.println((i + 1) + ". " + url);
                String title = doc.get("title");
                if (title != null) {
                    System.out.println("   Title: " + doc.get("title"));
                }
            } else {
                System.out.println((i + 1) + ". " + "No path for this document");
            }

        }

        if (!interactive || end == 0) {
            break;
        }

        if (numTotalHits >= end) {
            boolean quit = false;
            while (true) {
                System.out.print("Press ");
                if (start - hitsPerPage >= 0) {
                    System.out.print("(p)revious page, ");
                }
                if (start + hitsPerPage < numTotalHits) {
                    System.out.print("(n)ext page, ");
                }
                System.out.println("(q)uit or enter number to jump to a page.");

                String line = in.readLine();
                if (line.length() == 0 || line.charAt(0) == 'q') {
                    quit = true;
                    break;
                }
                if (line.charAt(0) == 'p') {
                    start = Math.max(0, start - hitsPerPage);
                    break;
                } else if (line.charAt(0) == 'n') {
                    if (start + hitsPerPage < numTotalHits) {
                        start += hitsPerPage;
                    }
                    break;
                } else {
                    int page = Integer.parseInt(line);
                    if ((page - 1) * hitsPerPage < numTotalHits) {
                        start = (page - 1) * hitsPerPage;
                        break;
                    } else {
                        System.out.println("No such page");
                    }
                }
            }
            if (quit)
                break;
            end = Math.min(numTotalHits, start + hitsPerPage);
        }
    }
}

From source file:edu.uci.ics.searcher.SearchFiles.java

License:Apache License

public static String[] getTopSearchResults(String query_string, int num_of_results) throws Exception {
    // Read index
    String index = "index";
    IndexReader idxreader = DirectoryReader.open(FSDirectory.open(new File(index)));

    // Init searcher of the index
    IndexSearcher searcher = new IndexSearcher(idxreader);

    // Set up query
    Query query = myBooleanQuery(query_string);

    // Addition scoring query
    CustomScoreQuery myCustomQuery = new MyOwnScoreQuery(query);

    //TopDocs results = searcher.search(query, num_of_results);
    TopDocs results = searcher.search(myCustomQuery.createWeight(searcher).getQuery(), num_of_results);
    ScoreDoc[] hits = results.scoreDocs;

    String[] top_results = new String[num_of_results];
    for (int i = 0; i < num_of_results; i++) {
        Document doc = searcher.doc(hits[i].doc);
        String url = doc.get("url");
        //System.out.println(url);
        top_results[i] = url;/*  ww  w  . j  a  v a  2s. co m*/
    }
    return top_results;
}

From source file:edu.ucla.cs.scai.canali.core.index.TokenIndex.java

public ArrayList<IndexedToken> getTokenElements(String search, String domainsOfProperty[],
        String rangesOfProperty[], String[] propertyDomains, int maxResults, String... tokenClasses) {

    System.out.println("SEARCH = " + search);
    for (String t : tokenClasses) {
        System.out.println("TC = " + t);
    }//from   w ww. ja  v a2 s .  com

    ArrayList<IndexedToken> res = new ArrayList<>();
    if (search == null) {
        search = "";
    }
    boolean classFound = false;
    boolean classAcceptable = false;
    HashSet<String> admissableLiterals = getAdmissableLiterals(rangesOfProperty);
    try {
        BooleanQuery globalQuery = new BooleanQuery();
        BooleanQuery typeQuery = new BooleanQuery();
        if (tokenClasses != null && tokenClasses.length > 0) {
            for (int i = 0; i < tokenClasses.length; i++) {
                BooleanQuery subTypeQuery = new BooleanQuery();
                subTypeQuery.add(new TermQuery(new Term("type", tokenClasses[i])), BooleanClause.Occur.MUST);
                switch (tokenClasses[i]) {
                case IndexedToken.PROPERTY:
                    if (domainsOfProperty != null && domainsOfProperty.length > 0) {
                        BooleanQuery domainOfQuery = new BooleanQuery();
                        for (String domainOfProperty : domainsOfProperty) {
                            domainOfQuery.add(
                                    new TermQuery(
                                            new Term("domainOfProperty", QueryParser.escape(domainOfProperty))),
                                    BooleanClause.Occur.SHOULD);
                        }
                        subTypeQuery.add(domainOfQuery, BooleanClause.Occur.MUST);
                    }
                    if (rangesOfProperty != null && rangesOfProperty.length > 0) {
                        BooleanQuery rangeOfQuery = new BooleanQuery();
                        for (String rangeOfProperty : rangesOfProperty) {
                            rangeOfQuery.add(
                                    new TermQuery(
                                            new Term("rangeOfProperty", QueryParser.escape(rangeOfProperty))),
                                    BooleanClause.Occur.SHOULD);
                        }
                        subTypeQuery.add(rangeOfQuery, BooleanClause.Occur.MUST);
                    }
                    if (propertyDomains != null && propertyDomains.length > 0) {
                        BooleanQuery domainQuery = new BooleanQuery();
                        for (String propertyDomain : propertyDomains) {
                            domainQuery.add(
                                    new TermQuery(
                                            new Term("propertyDomain", QueryParser.escape(propertyDomain))),
                                    BooleanClause.Occur.SHOULD);
                        }
                        subTypeQuery.add(domainQuery, BooleanClause.Occur.MUST);
                    }
                    break;
                case IndexedToken.ENTITY:
                    if (domainsOfProperty != null && domainsOfProperty.length > 0) {
                        BooleanQuery domainOfQuery = new BooleanQuery();
                        for (String domainOfProperty : domainsOfProperty) {
                            domainOfQuery.add(
                                    new TermQuery(
                                            new Term("domainOfProperty", QueryParser.escape(domainOfProperty))),
                                    BooleanClause.Occur.SHOULD);
                        }
                        subTypeQuery.add(domainOfQuery, BooleanClause.Occur.MUST);
                    }
                    if (rangesOfProperty != null && rangesOfProperty.length > 0) {
                        BooleanQuery rangeOfQuery = new BooleanQuery();
                        for (String rangeOfProperty : rangesOfProperty) {
                            rangeOfQuery.add(
                                    new TermQuery(
                                            new Term("rangeOfProperty", QueryParser.escape(rangeOfProperty))),
                                    BooleanClause.Occur.SHOULD);
                        }
                        subTypeQuery.add(rangeOfQuery, BooleanClause.Occur.MUST);
                    }
                    break;
                case IndexedToken.CLASS:
                    classAcceptable = true;
                    if (domainsOfProperty != null && domainsOfProperty.length > 0) {
                        BooleanQuery domainOfQuery = new BooleanQuery();
                        for (String domainOfProperty : domainsOfProperty) {
                            domainOfQuery.add(
                                    new TermQuery(
                                            new Term("domainOfProperty", QueryParser.escape(domainOfProperty))),
                                    BooleanClause.Occur.SHOULD);
                        }
                        subTypeQuery.add(domainOfQuery, BooleanClause.Occur.MUST);
                    }
                    if (rangesOfProperty != null && rangesOfProperty.length > 0) {
                        BooleanQuery rangeOfQuery = new BooleanQuery();
                        for (String rangeOfProperty : rangesOfProperty) {
                            rangeOfQuery.add(
                                    new TermQuery(
                                            new Term("rangeOfProperty", QueryParser.escape(rangeOfProperty))),
                                    BooleanClause.Occur.SHOULD);
                        }
                        subTypeQuery.add(rangeOfQuery, BooleanClause.Occur.MUST);
                    }
                    break;
                }
                typeQuery.add(subTypeQuery, BooleanClause.Occur.SHOULD);
            }
            if (tokenClasses.length > 1) {
                //typeQuery.setMinimumNumberShouldMatch(1);
            }
            globalQuery.add(typeQuery, BooleanClause.Occur.MUST);
        }

        BooleanQuery searchQuery = new BooleanQuery();
        String[] ss = search.split(" ");
        for (String s : ss) {
            if (!s.equals("")) // Modified to avoid query in lucene with empty label: 
                searchQuery.add(new TermQuery(new Term("label", QueryParser.escape(s))),
                        BooleanClause.Occur.SHOULD);
        }
        //searchQuery.setMinimumNumberShouldMatch(1);
        globalQuery.add(searchQuery, BooleanClause.Occur.MUST);
        QueryParser parser = new QueryParser("", analyzer);
        try (IndexReader reader = DirectoryReader.open(directory)) {
            IndexSearcher searcher = new IndexSearcher(reader);
            String queryString = globalQuery.toString(); //I need this because the parser works differently of different search features - look at its definition
            System.out.println("QUERY = " + queryString);
            ScoreDoc[] hits = searcher.search(parser.parse(queryString), maxResults * 5).scoreDocs;
            for (ScoreDoc r : hits) {
                Document doc = searcher.doc(r.doc);
                IndexedToken element = elements.get(doc.getField("id").numericValue().intValue());
                if (element instanceof DirectBinaryOperatorToken
                        || element instanceof IndirectBinaryOperatorToken) {
                    String op = ((OperatorToken) element).getSymbol();
                    if (op.startsWith("year") || op.startsWith("month")) {
                        if (admissableLiterals.contains(DATE)) {
                            res.add(element);
                        }
                    } else if (op.equals("=") || !admissableLiterals.isEmpty()) {
                        res.add(element);
                    }
                } else {
                    res.add(element);
                    if (element instanceof ClassToken) {
                        String fullText = search.toLowerCase();
                        fullText = fullText.toLowerCase();
                        boolean isPrefix = true;
                        if (fullText.endsWith(".")) {
                            fullText = fullText.substring(0, fullText.length() - 1);
                            isPrefix = false;
                        } else if (fullText.endsWith("?")) {
                            fullText = fullText.substring(0, fullText.length() - 1);
                            isPrefix = false;
                        } else if (fullText.endsWith(" and having")) {
                            fullText = fullText.substring(0, fullText.length() - 11);
                            isPrefix = false;
                        } else if (fullText.endsWith(" and with")) {
                            fullText = fullText.substring(0, fullText.length() - 9);
                            isPrefix = false;
                        } else if (fullText.endsWith(" having")) {
                            fullText = fullText.substring(0, fullText.length() - 7);
                            isPrefix = false;
                        } else if (fullText.endsWith(" with")) {
                            fullText = fullText.substring(0, fullText.length() - 5);
                            isPrefix = false;
                        }
                        fullText = fullText.trim();
                        classFound = true;
                        ClassToken ct = (ClassToken) element;
                        HashSet<String> searchWords = new HashSet(Arrays.asList(fullText.split(" ")));
                        HashSet<String> classWords = new HashSet(Arrays.asList((ct).getText().split(" "))); //this does not work with plural forms                            
                        searchWords.removeAll(classWords);
                        if (!searchWords.isEmpty()) {
                            AugmentedClassToken act = new AugmentedClassToken(ct, searchWords, isPrefix);
                            res.add(act);
                        }
                    }
                }
                if (res.size() == maxResults) {
                    //break;
                }
            } //qui???
        }
    } catch (Exception ex) {
        Logger.getLogger(TokenIndex.class.getName()).log(Level.SEVERE, null, ex);
    }
    if (classAcceptable && !classFound) {
        // System.out.println("Try class + keywords for " + search);
    }
    return res;
}

From source file:edu.ucla.cs.scai.canali.core.index.TokenIndex.java

public HashSet<String>[][] describeProperty(String label, int limit) {
    HashSet<String>[][] res = new HashSet[2][];
    res[0] = new HashSet[2];
    res[1] = new HashSet[3];
    Integer idA = ontologyElementsIdByUri.get(label);
    if (idA == null) {
        return res;
    }/*from   ww  w .j  ava2  s .  co m*/
    IndexedToken e = elements.get(idA);
    if (e == null || !(e instanceof PropertyToken)) {
        return res;
    }
    PropertyToken a = (PropertyToken) e;

    BooleanQuery globalQuery = new BooleanQuery();
    BooleanQuery typeQuery = new BooleanQuery();
    BooleanQuery subTypeQuery = new BooleanQuery();
    subTypeQuery.add(new TermQuery(new Term("type", IndexedToken.CLASS)), BooleanClause.Occur.MUST);
    typeQuery.add(subTypeQuery, BooleanClause.Occur.MUST);
    subTypeQuery = new BooleanQuery();
    subTypeQuery.add(new TermQuery(new Term("type", IndexedToken.PROPERTY)), BooleanClause.Occur.MUST);
    typeQuery.add(subTypeQuery, BooleanClause.Occur.MUST);
    globalQuery.add(typeQuery, BooleanClause.Occur.MUST);
    globalQuery.add(new TermQuery(new Term("domainOfProperty", QueryParser.escape(label))),
            BooleanClause.Occur.MUST);

    res[0][0] = new HashSet<>();
    res[0][1] = new HashSet<>();
    QueryParser parser = new QueryParser("", analyzer);
    try (IndexReader reader = DirectoryReader.open(directory)) {
        IndexSearcher searcher = new IndexSearcher(reader);
        String queryString = globalQuery.toString(); //I need this because the parser works differently of different search features - look at its definition
        ScoreDoc[] hits = searcher.search(parser.parse(queryString), 1000).scoreDocs;
        for (ScoreDoc r : hits) {
            Document doc = searcher.doc(r.doc);
            IndexedToken element = elements.get(doc.getField("id").numericValue().intValue());
            if (element instanceof PropertyToken) {
                res[0][1].add(((PropertyToken) element).uri);
            } else {
                res[0][0].add(((ClassToken) element).uri);
            }
        }
    } catch (Exception ex) {
        Logger.getLogger(TokenIndex.class.getName()).log(Level.SEVERE, null, ex);
    }

    globalQuery = new BooleanQuery();
    typeQuery = new BooleanQuery();
    subTypeQuery = new BooleanQuery();
    subTypeQuery.add(new TermQuery(new Term("type", IndexedToken.CLASS)), BooleanClause.Occur.MUST);
    typeQuery.add(subTypeQuery, BooleanClause.Occur.MUST);
    subTypeQuery = new BooleanQuery();
    subTypeQuery.add(new TermQuery(new Term("type", IndexedToken.PROPERTY)), BooleanClause.Occur.MUST);
    typeQuery.add(subTypeQuery, BooleanClause.Occur.MUST);
    globalQuery.add(typeQuery, BooleanClause.Occur.MUST);
    globalQuery.add(new TermQuery(new Term("rangeOfProperty", QueryParser.escape(label))),
            BooleanClause.Occur.MUST);

    res[1][0] = new HashSet<>();
    res[1][1] = new HashSet<>();
    try (IndexReader reader = DirectoryReader.open(directory)) {
        IndexSearcher searcher = new IndexSearcher(reader);
        String queryString = globalQuery.toString(); //I need this because the parser works differently of different search features - look at its definition
        ScoreDoc[] hits = searcher.search(parser.parse(queryString), 1000).scoreDocs;
        for (ScoreDoc r : hits) {
            Document doc = searcher.doc(r.doc);
            IndexedToken element = elements.get(doc.getField("id").numericValue().intValue());
            if (element instanceof PropertyToken) {
                res[1][1].add(((PropertyToken) element).uri);
            } else {
                res[1][0].add(((ClassToken) element).uri);
            }
        }
    } catch (Exception ex) {
        Logger.getLogger(TokenIndex.class.getName()).log(Level.SEVERE, null, ex);
    }

    String[] atts = new String[1];
    atts[0] = label;
    res[1][2] = new HashSet<>();
    for (String l : getAdmissableLiterals(atts)) {
        res[1][2].add(l);
    }
    return res;
}

From source file:edu.ucla.cs.scai.linkedspending.index.QueryIndexWithLucene.java

public ArrayList<WeightedDataSet> queryDataset(String query) throws Exception {
    BooleanQuery globalQuery = new BooleanQuery();
    BooleanQuery typeQuery = new BooleanQuery();
    typeQuery.add(new TermQuery(new Term("type", "dataset")), BooleanClause.Occur.MUST);
    globalQuery.add(typeQuery, BooleanClause.Occur.MUST);
    BooleanQuery searchQuery = new BooleanQuery();
    for (String s : keywordExtractor.normalizeWords(query)) {
        searchQuery.add(new TermQuery(new Term("label", QueryParser.escape(s))), BooleanClause.Occur.SHOULD);
    }//w w  w.ja  va2s . co  m
    globalQuery.add(searchQuery, BooleanClause.Occur.MUST);
    QueryParser parser = new QueryParser("", analyzer);
    ArrayList<WeightedDataSet> res = new ArrayList<>();
    try (IndexReader reader = DirectoryReader.open(directory)) {
        IndexSearcher searcher = new IndexSearcher(reader);
        String queryString = globalQuery.toString(); //I need this because the parser works differently of different search features - look at its definition
        ScoreDoc[] hits = searcher.search(parser.parse(queryString), 50).scoreDocs;
        for (ScoreDoc r : hits) {
            Document doc = searcher.doc(r.doc);
            res.add(new WeightedDataSet(doc.getField("uri").stringValue(), r.score));
        }
        return res;
    } catch (Exception ex) {
        Logger.getLogger(QueryIndexWithLucene.class.getName()).log(Level.SEVERE, null, ex);
    }
    return res;
}