Example usage for org.apache.solr.client.solrj SolrQuery getQuery

List of usage examples for org.apache.solr.client.solrj SolrQuery getQuery

Introduction

In this page you can find the example usage for org.apache.solr.client.solrj SolrQuery getQuery.

Prototype

public String getQuery() 

Source Link

Usage

From source file:org.ambraproject.search.service.SolrSearchService.java

License:Apache License

/**
 * Populate facets of the search object.
 *
 * If no search results and hence facets are found remove defined filters and try
 * the search again.  Journals and ArticleType facets will always be the complete list.
 *  //  w  w  w . j  a  v  a 2 s  .c  om
 *
 * @param searchParameters The search parameters
 * @return a populared SearchResultSinglePage object
 * @throws ApplicationException
 */
public SearchResultSinglePage getFilterData(SearchParameters searchParameters) throws ApplicationException {
    //TODO: This function queries SOLR for the journal and article type list
    //We should migrate this away from config and into a database when it is
    //available

    //Does not impact unformattedQuery field.
    SearchParameters sp = cleanStrings(searchParameters);

    String q = searchParameters.getUnformattedQuery().trim();

    //In this use case, if the query string is empty, we want to get facets for everything
    if (q.length() == 0) {
        q = "*:*";
    }

    if (log.isDebugEnabled()) {
        log.debug("Solr Search performed to get facet data on the unformattedSearch String: " + q);
    }

    //We want a complete set of facet data.  So first, lets get it all
    SolrQuery query = createQuery("*:*", 0, 0, false);

    //Remove facets we don't use in this case
    query.removeFacetField("author_facet");
    query.removeFacetField("editor_facet");
    query.removeFacetField("affiliate_facet");
    //Add the one we do want in this case.
    query.addFacetField("cross_published_journal_key");
    query.setFacetLimit(MAX_FACET_SIZE);

    //Related to JO: http://joborder.plos.org/view.php?id=17480
    //(for now) we don't want to search on Issue Images
    query.addFilterQuery(createFilterNoIssueImageDocuments());

    SearchResultSinglePage preFilterResults = search(query);

    setFilters(query, sp, true);

    query.setQuery(q);

    SearchResultSinglePage results = null;
    try {
        results = search(query);
    } catch (SolrException e) {
        query.setQuery("*:*");
        if (log.isWarnEnabled()) {
            log.warn("Solr Search failed on the unformattedSearch String: { " + query.getQuery()
                    + " } so the query will be re-run using the String *:* to populate the Filters"
                    + " on the Advanced Search page.", e);
        }
    }

    if (results == null || results.getTotalNoOfResults() == 0) {
        //If no results, remove optional filters and try again
        for (String filter : query.getFilterQueries()) {
            if (filter.indexOf(createFilterFullDocuments()) < 0) {
                query.removeFilterQuery(filter);
            }
        }

        results = search(query);

        //If results are STILL empty.  We must return something for subjects.
        //So let's use the global list
        if (results.getTotalNoOfResults() == 0) {
            results.setSubjectFacet(preFilterResults.getSubjectFacet());
        }

        results.setFiltersReset(true);
    }

    //Lets always return ALL values for journals and article types
    //These lists will not be dependant on the user's other
    //selections other then the query
    //However, subjects will be!
    results.setJournalFacet(preFilterResults.getJournalFacet());
    results.setArticleTypeFacet(preFilterResults.getArticleTypeFacet());

    return results;
}

From source file:org.ambraproject.search.service.SolrSearchService.java

License:Apache License

private SearchResultSinglePage readQueryResults(QueryResponse queryResponse, SolrQuery query) {
    SolrDocumentList documentList = queryResponse.getResults();

    if (log.isInfoEnabled()) {
        StringBuilder filterQueriesForLog = new StringBuilder();
        if (query.getFilterQueries() != null && query.getFilterQueries().length > 0) {
            for (String filterQuery : query.getFilterQueries()) {
                filterQueriesForLog.append(filterQuery).append(" , ");
            }/*  www  .j ava  2 s. c o  m*/
            if (filterQueriesForLog.length() > 3) {
                filterQueriesForLog.replace(filterQueriesForLog.length() - 3, filterQueriesForLog.length(), "");
            } else {
                filterQueriesForLog.append("No Filter Queries");
            }
        }

        log.info("query.getQuery():{ " + query.getQuery() + " }" + ", query.getSortFields():{ "
                + (query.getSortFields() == null ? null : Arrays.asList(query.getSortFields())) + " }"
                + ", query.getFilterQueries():{ " + filterQueriesForLog.toString() + " }" + ", found:"
                + documentList.getNumFound() + ", start:" + documentList.getStart() + ", max_score:"
                + documentList.getMaxScore() + ", QTime:" + queryResponse.getQTime() + "ms");

        // TODO: implement spell-checking in a meaningful manner.  This loop exists only to generate log output.
        // TODO: Add "spellcheckAlternatives" or something like it to the SearchHits class so it can be displayed to the user like Google's "did you mean..."
        // TODO: Turn off spellchecking for the "author" field.
        if (queryResponse.getSpellCheckResponse() != null
                && queryResponse.getSpellCheckResponse().getSuggestionMap() != null
                && queryResponse.getSpellCheckResponse().getSuggestionMap().keySet().size() > 0) {
            StringBuilder sb = new StringBuilder("Spellcheck alternative suggestions:");
            for (String token : queryResponse.getSpellCheckResponse().getSuggestionMap().keySet()) {
                sb.append(" { ").append(token).append(" : ");
                if (queryResponse.getSpellCheckResponse().getSuggestionMap().get(token).getAlternatives()
                        .size() < 1) {
                    sb.append("NO ALTERNATIVES");
                } else {
                    for (String alternative : queryResponse.getSpellCheckResponse().getSuggestionMap()
                            .get(token).getAlternatives()) {
                        sb.append(alternative).append(", ");
                    }
                    sb.replace(sb.length() - 2, sb.length(), ""); // Remove last comma and space.
                }
                sb.append(" } ,");
            }
            log.info(sb.replace(sb.length() - 2, sb.length(), "").toString()); // Remove last comma and space.
        } else {
            log.info("Solr thinks everything in the query is spelled correctly.");
        }
    }

    Map<String, Map<String, List<String>>> highlightings = queryResponse.getHighlighting();

    List<SearchHit> searchResults = new ArrayList<SearchHit>();
    for (SolrDocument document : documentList) {

        String id = getFieldValue(document, "id", String.class, query.toString());
        String message = id == null ? query.toString() : id;
        Float score = getFieldValue(document, "score", Float.class, message);
        String title = getFieldValue(document, "title_display", String.class, message);
        Date publicationDate = getFieldValue(document, "publication_date", Date.class, message);
        String eissn = getFieldValue(document, "eissn", String.class, message);
        String journal = getFieldValue(document, "journal", String.class, message);
        String articleType = getFieldValue(document, "article_type", String.class, message);

        List<String> authorList = getFieldMultiValue(document, message, String.class, "author_display");

        String highlights = null;
        if (query.getHighlight()) {
            highlights = getHighlights(highlightings.get(id));
        }

        SearchHit hit = new SearchHit(score, id, title, highlights, authorList, publicationDate, eissn, journal,
                articleType);

        if (log.isDebugEnabled())
            log.debug(hit.toString());

        searchResults.add(hit);
    }

    //here we assume that number of hits is always going to be withing range of int
    SearchResultSinglePage results = new SearchResultSinglePage((int) documentList.getNumFound(), -1,
            searchResults, query.getQuery());

    if (queryResponse.getFacetField("subject_facet") != null) {
        results.setSubjectFacet(facetCountsToHashMap(queryResponse.getFacetField("subject_facet")));
    }

    if (queryResponse.getFacetField("author_facet") != null) {
        results.setAuthorFacet(facetCountsToHashMap(queryResponse.getFacetField("author_facet")));
    }

    if (queryResponse.getFacetField("editor_facet") != null) {
        results.setEditorFacet(facetCountsToHashMap(queryResponse.getFacetField("editor_facet")));
    }

    if (queryResponse.getFacetField("article_type_facet") != null) {
        results.setArticleTypeFacet(facetCountsToHashMap(queryResponse.getFacetField("article_type_facet")));
    }

    if (queryResponse.getFacetField("affiliate_facet") != null) {
        results.setInstitutionFacet(facetCountsToHashMap(queryResponse.getFacetField("affiliate_facet")));
    }

    if (queryResponse.getFacetField("cross_published_journal_key") != null) {
        results.setJournalFacet(
                facetCountsToHashMap(queryResponse.getFacetField("cross_published_journal_key")));
    }

    return results;
}

From source file:org.ambraproject.service.search.SolrSearchService.java

License:Apache License

/**
 * Execute a Solr search composed from the contents of the <code>SearchParameters.unformattedQuery</code> property.
 * The query is filtered by the journal and category fields also contained in the <code>searchParameters</code>
 * parameter.  No filter is created for date ranges, since that is assumed to be contained in
 * <code>SearchParameters.unformattedQuery</code>.
 *
 * @param searchParameters Contains all the parameters necessary to execute a search against the Solr query engine
 * @return A subset (determined by <code>SearchParameters.startPage</code> and <code>SearchParameters.pageSize</code>
 *         of the results of the Solr query generated from the contents of the <code>searchParameters</code>
 *         parameter//from  w w  w.  j a v a2 s .  c o  m
 * @throws ApplicationException Thrown during failed interactions with the Solr Server
 */
public SearchResultSinglePage advancedSearch(SearchParameters searchParameters) throws ApplicationException {
    SearchParameters sp = cleanStrings(searchParameters); // Does not impact unformattedQuery field.
    if (log.isDebugEnabled()) {
        log.debug("Solr Search performed on the unformattedSearch String: "
                + searchParameters.getUnformattedQuery().trim());
    }

    SolrQuery query = createQuery(null, sp.getStartPage(), sp.getPageSize(), false);
    query.setQuery(searchParameters.getUnformattedQuery().trim());

    SolrQuery journalFacetsQuery = createFacetsQuery(query.getQuery(), "cross_published_journal_key", false);
    SolrQuery articleTypeFacetsQuery = createFacetsQuery(query.getQuery(), "article_type_facet", false);

    setFilters(query, sp, false, false);

    //The journals query doesn't get the journal filter and the articles query doesn't get the articles filter
    //Notice: there is some code duplication here. note above
    setFilters(journalFacetsQuery, sp, true, false);
    setFilters(articleTypeFacetsQuery, sp, false, true);

    setSort(query, sp);

    QueryResponse journalFacetsResponse = getSOLRResponse(journalFacetsQuery);
    QueryResponse articleTypeFacetsResponse = getSOLRResponse(articleTypeFacetsQuery);

    //Notice: there is some code duplication here. note above
    FacetField journals = journalFacetsResponse.getFacetField("cross_published_journal_key");
    FacetField articleTypes = articleTypeFacetsResponse.getFacetField("article_type_facet");

    SearchResultSinglePage results = search(query.setQuery(searchParameters.getUnformattedQuery().trim()));

    results.setJournalFacet(facetCountsToHashMap(journals));
    results.setArticleTypeFacet(facetCountsToHashMap(articleTypes));

    return results;
}

From source file:org.ambraproject.service.search.SolrSearchService.java

License:Apache License

/**
 * Populate facets of the search object.
 * <p/>//  www.  j a  va  2 s.  c  om
 * If no search results and hence facets are found remove defined filters and try the search again.  Journals will
 * always be the complete list.
 *
 * @param searchParameters The search parameters
 * @return a populared SearchResultSinglePage object
 * @throws ApplicationException
 */
public SearchResultSinglePage getFilterData(SearchParameters searchParameters) throws ApplicationException {
    //TODO: This function queries SOLR for the journal and article type list
    //We should migrate this away from config and into a database when it is
    //available

    //Does not impact unformattedQuery field.
    SearchParameters sp = cleanStrings(searchParameters);

    String q = searchParameters.getUnformattedQuery().trim();

    //In this use case, if the query string is empty, we want to get facets for everything
    if (q.length() == 0) {
        q = "*:*";
    }

    if (log.isDebugEnabled()) {
        log.debug("Solr Search performed to get facet data on the unformattedSearch String: " + q);
    }

    //We want a complete set of facet data.  So first, lets get it all
    SolrQuery query = createQuery("*:*", 0, 0, false);

    //Remove facets we don't use in this case
    query.removeFacetField("author_facet");
    query.removeFacetField("editor_facet");
    query.removeFacetField("affiliate_facet");
    //Add the one we do want in this case.
    query.addFacetField("cross_published_journal_key");
    query.addFacetField("article_type");
    query.setFacetLimit(MAX_FACET_SIZE);

    //Related to JO: http://joborder.plos.org/view.php?id=17480
    //(for now) we don't want to search on Issue Images
    query.addFilterQuery(createFilterNoIssueImageDocuments());

    SearchResultSinglePage preFilterResults = search(query);

    setFilters(query, sp, false, false);

    query.setQuery(q);

    SearchResultSinglePage results = null;
    try {
        results = search(query);
    } catch (SolrException e) {
        query.setQuery("*:*");
        if (log.isWarnEnabled()) {
            log.warn("Solr Search failed on the unformattedSearch String: { " + query.getQuery()
                    + " } so the query will be re-run using the String *:* to populate the Filters"
                    + " on the Advanced Search page.", e);
        }
    }

    if (results == null || results.getTotalNoOfResults() == 0) {
        //If no results, remove optional filters and try again
        for (String filter : query.getFilterQueries()) {
            if (filter.indexOf(createFilterFullDocuments()) < 0) {
                query.removeFilterQuery(filter);
            }
        }

        results = search(query);

        //If results are STILL empty.  We must return something for subjects and article type.
        //So let's use the global list
        if (results.getTotalNoOfResults() == 0) {
            results.setSubjectFacet(preFilterResults.getSubjectFacet());
            results.setArticleTypeFacet(preFilterResults.getArticleTypeFacet());
        }

        results.setFiltersReset(true);
    }

    //Lets always return ALL values for journals
    //These lists will not be dependant on the user's other
    //selections other then the query
    //However, subjects and article type will be!
    results.setJournalFacet(preFilterResults.getJournalFacet());
    results.setArticleTypeFacet(preFilterResults.getArticleTypeFacet());

    return results;
}

From source file:org.ambraproject.service.search.SolrSearchService.java

License:Apache License

@SuppressWarnings("unchecked")
private SearchResultSinglePage readQueryResults(QueryResponse queryResponse, SolrQuery query) {
    SolrDocumentList documentList = queryResponse.getResults();

    if (log.isInfoEnabled()) {
        StringBuilder filterQueriesForLog = new StringBuilder();
        if (query.getFilterQueries() != null && query.getFilterQueries().length > 0) {
            for (String filterQuery : query.getFilterQueries()) {
                filterQueriesForLog.append(filterQuery).append(" , ");
            }/*from  w w  w  . j  ava2 s .  co  m*/
            if (filterQueriesForLog.length() > 3) {
                filterQueriesForLog.replace(filterQueriesForLog.length() - 3, filterQueriesForLog.length(), "");
            } else {
                filterQueriesForLog.append("No Filter Queries");
            }
        }

        log.info("query.getQuery():{ " + query.getQuery() + " }" + ", query.getSortFields():{ "
                + (query.getSortFields() == null ? null : Arrays.asList(query.getSortFields())) + " }"
                + ", query.getFilterQueries():{ " + filterQueriesForLog.toString() + " }" + ", found:"
                + documentList.getNumFound() + ", start:" + documentList.getStart() + ", max_score:"
                + documentList.getMaxScore() + ", QTime:" + queryResponse.getQTime() + "ms");

        // TODO: implement spell-checking in a meaningful manner.  This loop exists only to generate log output.
        // TODO: Add "spellcheckAlternatives" or something like it to the SearchHits class so it can be displayed to the user like Google's "did you mean..."
        // TODO: Turn off spellchecking for the "author" field.
        if (queryResponse.getSpellCheckResponse() != null
                && queryResponse.getSpellCheckResponse().getSuggestionMap() != null
                && queryResponse.getSpellCheckResponse().getSuggestionMap().keySet().size() > 0) {
            StringBuilder sb = new StringBuilder("Spellcheck alternative suggestions:");
            for (String token : queryResponse.getSpellCheckResponse().getSuggestionMap().keySet()) {
                sb.append(" { ").append(token).append(" : ");
                if (queryResponse.getSpellCheckResponse().getSuggestionMap().get(token).getAlternatives()
                        .size() < 1) {
                    sb.append("NO ALTERNATIVES");
                } else {
                    for (String alternative : queryResponse.getSpellCheckResponse().getSuggestionMap()
                            .get(token).getAlternatives()) {
                        sb.append(alternative).append(", ");
                    }
                    sb.replace(sb.length() - 2, sb.length(), ""); // Remove last comma and space.
                }
                sb.append(" } ,");
            }
            log.info(sb.replace(sb.length() - 2, sb.length(), "").toString()); // Remove last comma and space.
        } else {
            log.info("Solr thinks everything in the query is spelled correctly.");
        }
    }

    List<SearchHit> searchResults = new ArrayList<SearchHit>();
    for (SolrDocument document : documentList) {

        String id = SolrServiceUtil.getFieldValue(document, "id", String.class, query.toString());
        String message = id == null ? query.toString() : id;
        Float score = SolrServiceUtil.getFieldValue(document, "score", Float.class, message);
        String title = SolrServiceUtil.getFieldValue(document, "title_display", String.class, message);
        Date publicationDate = SolrServiceUtil.getFieldValue(document, "publication_date", Date.class, message);
        String eissn = SolrServiceUtil.getFieldValue(document, "eissn", String.class, message);
        String journal = SolrServiceUtil.getFieldValue(document, "journal", String.class, message);
        String articleType = SolrServiceUtil.getFieldValue(document, "article_type", String.class, message);
        String strikingImage = SolrServiceUtil.getFieldValue(document, "striking_image", String.class, message);
        List<String> abstractText = SolrServiceUtil.getFieldMultiValue(document, "abstract", String.class,
                message);
        List<String> abstractPrimary = SolrServiceUtil.getFieldMultiValue(document, "abstract_primary_display",
                String.class, message);
        List<String> authorList = SolrServiceUtil.getFieldMultiValue(document, "author_display", String.class,
                message);
        // TODO create a dedicated field for checking the existence of assets for a given article.
        List<String> figureTableCaptions = SolrServiceUtil.getFieldMultiValue(document, "figure_table_caption",
                String.class, message);
        List<String> subjects = SolrServiceUtil.getFieldMultiValue(document, "subject", String.class, message);
        List<String> expressionOfconcern = SolrServiceUtil.getFieldMultiValue(document, "expression_of_concern",
                String.class, message);
        String retraction = SolrServiceUtil.getFieldValue(document, "retraction", String.class, message);
        String abstractResult = "";

        //Use the primary abstract if it exists
        if (abstractPrimary.size() > 0) {
            abstractResult = StringUtils.join(abstractPrimary, ", ");
        } else {
            if (abstractText.size() > 0) {
                abstractResult = StringUtils.join(abstractText, ", ");
            }
        }

        //Flatten the list of subjects to a unique set
        Set<String> flattenedSubjects = new HashSet<String>();
        for (String subject : subjects) {
            for (String temp : subject.split("/")) {
                if (temp.trim().length() > 0) {
                    flattenedSubjects.add(temp);
                }
            }
        }

        SearchHit hit = SearchHit.builder().setHitScore(score).setUri(id).setTitle(title)
                .setListOfCreators(authorList).setDate(publicationDate).setIssn(eissn).setJournalTitle(journal)
                .setArticleTypeForDisplay(articleType).setAbstractText(abstractResult)
                .setStrikingImage(strikingImage).setHasAssets(figureTableCaptions.size() > 0)
                .setSubjects(flattenedSubjects).setSubjectsPolyhierarchy(subjects)
                .setExpressionOfConcern(expressionOfconcern).setRetraction(retraction).build();

        if (log.isDebugEnabled())
            log.debug(hit.toString());

        searchResults.add(hit);
    }

    //here we assume that number of hits is always going to be withing range of int
    SearchResultSinglePage results = new SearchResultSinglePage((int) documentList.getNumFound(), -1,
            searchResults, query.getQuery());

    if (queryResponse.getFacetField("subject_facet") != null) {
        List<Map> subjects = facetCountsToHashMap(queryResponse.getFacetField("subject_facet"));

        if (subjects != null) {
            List<Map> subjectResult = new ArrayList<Map>();
            SortedMap<String, Long> topSubjects = null;

            try {
                topSubjects = getTopSubjects();
            } catch (ApplicationException ex) {
                throw new RuntimeException(ex.getMessage(), ex);
            }

            //Remove top level 1 subjects from list, FEND-805
            for (Map<String, Object> m : subjects) {
                if (!topSubjects.containsKey(m.get("name"))) {
                    HashMap<String, Object> hm = new HashMap<String, Object>();
                    hm.put("name", m.get("name"));
                    hm.put("count", m.get("count"));
                    subjectResult.add(hm);
                }
            }

            results.setSubjectFacet(subjectResult);
        } else {
            results.setSubjectFacet(null);
        }
    }

    if (queryResponse.getFacetField("author_facet") != null) {
        results.setAuthorFacet(facetCountsToHashMap(queryResponse.getFacetField("author_facet")));
    }

    if (queryResponse.getFacetField("editor_facet") != null) {
        results.setEditorFacet(facetCountsToHashMap(queryResponse.getFacetField("editor_facet")));
    }

    if (queryResponse.getFacetField("article_type_facet") != null) {
        results.setArticleTypeFacet(facetCountsToHashMap(queryResponse.getFacetField("article_type_facet")));
    }

    if (queryResponse.getFacetField("affiliate_facet") != null) {
        results.setInstitutionFacet(facetCountsToHashMap(queryResponse.getFacetField("affiliate_facet")));
    }

    if (queryResponse.getFacetField("cross_published_journal_key") != null) {
        results.setJournalFacet(
                facetCountsToHashMap(queryResponse.getFacetField("cross_published_journal_key")));
    }

    return results;
}

From source file:org.apache.jackrabbit.oak.plugins.index.solr.query.FilterQueryParserTest.java

License:Apache License

@Test
public void testMatchAllConversionWithNoConstraints() throws Exception {
    Filter filter = mock(Filter.class);
    OakSolrConfiguration configuration = mock(OakSolrConfiguration.class);
    QueryIndex.IndexPlan plan = mock(QueryIndex.IndexPlan.class);
    SolrQuery solrQuery = FilterQueryParser.getQuery(filter, plan, configuration);
    assertNotNull(solrQuery);//from w ww . j  a v  a  2 s. c om
    assertEquals("*:*", solrQuery.getQuery());
}

From source file:org.apache.metron.solr.matcher.SolrQueryMatcher.java

License:Apache License

@Override
public boolean matches(Object o) {
    SolrQuery solrQuery = (SolrQuery) o;
    return Objects.equals(solrQuery.getStart(), expectedSolrQuery.getStart())
            && Objects.equals(solrQuery.getRows(), expectedSolrQuery.getRows())
            && Objects.equals(solrQuery.getQuery(), expectedSolrQuery.getQuery())
            && Objects.equals(solrQuery.getSorts(), expectedSolrQuery.getSorts())
            && Objects.equals(solrQuery.getFields(), expectedSolrQuery.getFields())
            && Arrays.equals(solrQuery.getFacetFields(), expectedSolrQuery.getFacetFields())
            && Objects.equals(solrQuery.get("collection"), expectedSolrQuery.get("collection"))
            && Objects.equals(solrQuery.get("stats"), expectedSolrQuery.get("stats"))
            && Objects.equals(solrQuery.get("stats.field"), expectedSolrQuery.get("stats.field"))
            && Objects.equals(solrQuery.get("facet"), expectedSolrQuery.get("facet"))
            && Objects.equals(solrQuery.get("facet.pivot"), expectedSolrQuery.get("facet.pivot"));
}

From source file:org.codice.solr.query.SolrQueryFilterVisitorTest.java

License:Open Source License

@Test
@Ignore/*  w w  w . ja  v a  2  s  .co m*/
public void test() throws Exception {
    LOGGER.info("Running test ...");

    // setup
    String workingDir = System.getProperty("user.dir") + "/src/test/resources/";
    String solrConfDir = workingDir + "solr/conf/";
    File solrConfigFile = new File(solrConfDir + "solrconfig.xml"); //getConfigFile(solrConfigFileName, configProxy);
    assertTrue(solrConfigFile.exists());
    File solrSchemaFile = new File(solrConfDir + "schema.xml"); //getConfigFile(schemaFileName, configProxy);
    assertTrue(solrSchemaFile.exists());
    File solrFile = new File(solrConfDir + "solr.xml"); //getConfigFile(DEFAULT_SOLR_XML, configProxy);
    assertTrue(solrFile.exists());

    File solrConfigHome = new File(solrConfigFile.getParent());
    assertTrue(solrConfigHome.exists());

    SolrConfig solrConfig = null;
    IndexSchema indexSchema = null;
    SolrResourceLoader resourceLoader = null;
    SolrCoreContainer container = null;

    try {
        // NamedSPILoader uses the thread context classloader to lookup
        // codecs, posting formats, and analyzers
        solrConfig = new SolrConfig(solrConfigHome.getParent(), "solrConfig.xml",
                new InputSource(FileUtils.openInputStream(solrConfigFile)));
        assertNotNull(solrConfig);
        indexSchema = new IndexSchema(solrConfig, "schema.xml",
                new InputSource(FileUtils.openInputStream(solrSchemaFile)));
        assertNotNull(indexSchema);
        resourceLoader = new SolrResourceLoader(solrConfigHome.getAbsolutePath());
        assertNotNull(resourceLoader);
        container = new SolrCoreContainer(resourceLoader, solrFile);
        assertNotNull(container);
        CoreDescriptor coreDescriptor = new CoreDescriptor(container, CORE_NAME,
                solrConfig.getResourceLoader().getInstanceDir());
        assertNotNull(coreDescriptor);

        File dataDir = new File(workingDir + "data"); //configProxy.getDataDirectory();
        LOGGER.debug("Using data directory [{}]", dataDir);

        SolrCore core = new SolrCore(CORE_NAME, dataDir.getAbsolutePath(), solrConfig, indexSchema,
                coreDescriptor);
        container.register(CORE_NAME, core, false);
        assertNotNull(core);

        EmbeddedSolrServer solrServer = new EmbeddedSolrServer(container, CORE_NAME);

        // the test
        SolrQueryFilterVisitor visitor = new SolrQueryFilterVisitor(solrServer, CORE_NAME);
        Filter filter = ECQL.toFilter("Name = 'Hugh'");
        SolrQuery solrQuery = (SolrQuery) filter.accept(visitor, null);
        assertNotNull(solrQuery);

        // Solr does not support outside parenthesis in certain queries and throws EOF exception.
        String queryPhrase = solrQuery.getQuery().trim();
        if (queryPhrase.matches("\\(\\s*\\{!.*\\)")) {
            solrQuery.setQuery(queryPhrase.replaceAll("^\\(\\s*|\\s*\\)$", ""));
        }
        LOGGER.info("solrQuery = {}", solrQuery);

        QueryResponse solrResponse = solrServer.query(solrQuery, METHOD.POST);
        assertNotNull(solrResponse);
        long numResults = solrResponse.getResults().getNumFound();
        LOGGER.info("numResults = {}", numResults);
    } catch (ParserConfigurationException e) {
        LOGGER.warn("Parser configuration exception loading index schema", e);
    } catch (IOException e) {
        LOGGER.warn("IO exception loading index schema", e);
    } catch (SAXException e) {
        LOGGER.warn("SAX exception loading index schema", e);
    }
}

From source file:org.craftercms.search.service.impl.SolrSearchService.java

License:Open Source License

protected void addAdditionalFilterQueries(String indexId, SolrQuery solrQuery) {
    if (solrQuery.isDisableAdditionalFilters()) {
        if (logger.isDebugEnabled()) {
            logger.debug("{}Additional filters disabled for query {}", getIndexPrefix(indexId), solrQuery);
        }/*from  w  w  w  . ja  v  a2s .  co m*/

        return;
    }

    String query = solrQuery.getQuery();
    String[] filterQueries = solrQuery.getFilterQueries();

    for (String additionalFilterQuery : additionalFilterQueries) {
        boolean add = true;

        if (StringUtils.isNotEmpty(query)) {
            if (query.contains(additionalFilterQuery)) {
                add = false;
            }
        }

        if (ArrayUtils.isNotEmpty(filterQueries)) {
            for (String filterQuery : filterQueries) {
                if (filterQuery.contains(additionalFilterQuery)) {
                    add = false;
                    break;
                }
            }
        }

        if (add) {
            solrQuery.addFilterQuery(additionalFilterQuery);
        }
    }
}

From source file:org.eclipse.rdf4j.sail.solr.SolrIndex.java

License:Open Source License

/**
 * Evaluates the given query only for the given resource.
 * /*from  w  w w  .jav  a  2 s  . co m*/
 * @throws SolrServerException
 */
public QueryResponse search(Resource resource, SolrQuery query) throws SolrServerException, IOException {
    // rewrite the query
    String idQuery = termQuery(SearchFields.URI_FIELD_NAME, SearchFields.getResourceID(resource));
    query.setQuery(query.getQuery() + " AND " + idQuery);
    return search(query);
}