Example usage for org.apache.commons.lang StringUtils containsIgnoreCase

List of usage examples for org.apache.commons.lang StringUtils containsIgnoreCase

Introduction

In this page you can find the example usage for org.apache.commons.lang StringUtils containsIgnoreCase.

Prototype

public static boolean containsIgnoreCase(String str, String searchStr) 

Source Link

Document

Checks if String contains a search String irrespective of case, handling null.

Usage

From source file:org.fao.geonet.kernel.harvest.harvester.csw.Harvester.java

private void buildFilterQueryable(List<Element> queryables, String name, String value) {
    if (value.contains("%") || StringUtils.containsIgnoreCase(name, "AnyText")) {
        buildFilterQueryable(queryables, name, value, "PropertyIsLike");
    } else {/*from w w w . ja va  2  s .c om*/
        buildFilterQueryable(queryables, name, value, "PropertyIsEqualTo");
    }
}

From source file:org.fao.geonet.kernel.search.LuceneSearcher.java

/**
 * Perform a query, loop over results in order to find values containing the search value for a specific field.
 * /*from w w w .j  a  v a 2s. c  om*/
 * If the field is not stored in the index, an empty collection is returned.
 * 
 * @param srvContext
 * @param searchField   The field to search in
 * @param searchValue   The value contained in field's value (case is ignored)
 * @param maxNumberOfTerms   The maximum number of terms to search for
 * @param threshold   The minimum frequency for terms to be returned
 * @return
 * @throws Exception
 */
public Collection<SearchManager.TermFrequency> getSuggestionForFields(ServiceContext srvContext,
        final String searchField, final String searchValue, ServiceConfig config, int maxNumberOfTerms,
        int threshold) throws Exception {
    if (Log.isDebugEnabled(Geonet.SEARCH_ENGINE)) {
        Log.debug(Geonet.SEARCH_ENGINE,
                "Get suggestion on field: '" + searchField + "'" + "\tsearching: '" + searchValue + "'"
                        + "\tthreshold: '" + threshold + "'" + "\tmaxNumberOfTerms: '" + maxNumberOfTerms
                        + "'");
    }

    // To count the number of values added and stop if maxNumberOfTerms reach
    int counter = 0;
    String searchValueWithoutWildcard = searchValue.replaceAll("[*?]", "");
    String analyzedSearchValue = analyzeText(searchField, searchValueWithoutWildcard,
            SearchManager.getAnalyzer(_language, true));
    Map<String, SearchManager.TermFrequency> finalValuesMap = new HashMap<String, SearchManager.TermFrequency>();

    GeonetContext gc = null;
    if (srvContext != null) {
        gc = (GeonetContext) srvContext.getHandlerContext(Geonet.CONTEXT_NAME);
    }

    // Search for all current session could search for
    // Do a like query to limit the size of the results
    Element elData = new Element(Jeeves.Elem.REQUEST); // SearchDefaults.getDefaultSearch(srvContext, null);
    elData.addContent(new Element("fast").addContent("index"))
            .addContent(new Element(Geonet.SearchResult.BUILD_SUMMARY).addContent(Boolean.toString(false)));

    if (!searchValue.equals("")) {
        elData.addContent(new Element(searchField).setText(searchValue));
    }
    search(srvContext, elData, config);

    elData.addContent(new Element("from").setText("1"));
    elData.addContent(new Element("to").setText(getSize() + ""));

    if (getTo() > 0) {
        TopDocs tdocs = performQuery(0, getSize(), false);

        for (int i = 0; i < tdocs.scoreDocs.length; i++) {
            if (counter >= maxNumberOfTerms) {
                break;
            }
            Document doc;

            DocumentStoredFieldVisitor docVisitor = new DocumentStoredFieldVisitor(
                    Collections.singleton(searchField));
            IndexAndTaxonomy indexAndTaxonomy = _sm.getIndexReader(_language, _versionToken);
            _versionToken = indexAndTaxonomy.version;
            try {
                indexAndTaxonomy.indexReader.document(tdocs.scoreDocs[i].doc, docVisitor);
                doc = docVisitor.getDocument();

                String[] values = doc.getValues(searchField);

                for (int j = 0; j < values.length; ++j) {
                    if (searchValue.equals("") || StringUtils.containsIgnoreCase(values[j], analyzedSearchValue)
                            || StringUtils.containsIgnoreCase(values[j], searchValueWithoutWildcard)) {
                        // Use a map to save values frequency
                        String termName = values[j];
                        TermFrequency valueFrequency = finalValuesMap.get(termName);
                        if (valueFrequency != null) {
                            // Log.debug(Geonet.SEARCH_ENGINE, "  " +
                            // values[j] + ":" + valueFrequency);
                            valueFrequency.setFrequency(valueFrequency.getFrequency() + 1);
                        } else {
                            finalValuesMap.put(termName, new TermFrequency(termName, 1));
                        }

                        counter++;
                    }
                }
            } finally {
                _sm.releaseIndexReader(indexAndTaxonomy);
            }
        }
    }

    // Filter values which does not reach the threshold
    if (threshold > 1) {
        int size = finalValuesMap.size();
        Iterator<?> it = finalValuesMap.entrySet().iterator();
        while (it.hasNext()) {
            Entry<?, ?> item = (Entry<?, ?>) it.next();
            TermFrequency term = (TermFrequency) item.getValue();
            if (term.getFrequency() < threshold) {
                it.remove();
            }
        }
        if (Log.isDebugEnabled(Geonet.SEARCH_ENGINE)) {
            Log.debug(Geonet.SEARCH_ENGINE,
                    "  " + finalValuesMap.size() + "/" + size + " above threshold: " + threshold);
        }
    }

    if (Log.isDebugEnabled(Geonet.SEARCH_ENGINE)) {
        Log.debug(Geonet.SEARCH_ENGINE, "  " + finalValuesMap.size() + " returned.");
    }
    return finalValuesMap.values();
}

From source file:org.fao.geonet.kernel.search.SearchManager.java

/**
 * Browses the index for the specified Lucene field and return the list of terms found containing the search value
  * with their frequency.//from w  w w  .  j  a va  2s  .c  o m
 *
 * @param fieldName   The Lucene field name
 * @param searchValue   The value to search for. Could be "".
 * @param maxNumberOfTerms   Max number of term's values to look in the index. For large catalogue
 * this value should be increased in order to get better results. If this
 * value is too high, then looking for terms could take more times. The use
 * of good analyzer should allow to reduce the number of useless values like
 * (a, the, ...).
 * @param threshold   Minimum frequency for a term to be returned.
 * @return   An unsorted and unordered list of terms with their frequency.
 * @throws Exception
 */
public Collection<TermFrequency> getTermsFequency(String fieldName, String searchValue, int maxNumberOfTerms,
        int threshold, String language) throws Exception {
    Collection<TermFrequency> termList = new ArrayList<TermFrequency>();
    IndexAndTaxonomy indexAndTaxonomy = getNewIndexReader(null);
    String searchValueWithoutWildcard = searchValue.replaceAll("[*?]", "");
    String analyzedSearchValue = LuceneSearcher.analyzeText(fieldName, searchValueWithoutWildcard,
            SearchManager.getAnalyzer(language, true));

    boolean startsWithOnly = !searchValue.startsWith("*") && searchValue.endsWith("*");

    try {
        GeonetworkMultiReader multiReader = indexAndTaxonomy.indexReader;
        @SuppressWarnings("resource")
        SlowCompositeReaderWrapper atomicReader = new SlowCompositeReaderWrapper(multiReader);
        Terms terms = atomicReader.terms(fieldName);
        if (terms != null) {
            TermsEnum termEnum = terms.iterator(null);
            int i = 1;
            BytesRef term = termEnum.next();
            while (term != null && i++ < maxNumberOfTerms) {
                String text = term.utf8ToString();
                if (termEnum.docFreq() >= threshold) {
                    String analyzedText = LuceneSearcher.analyzeText(fieldName, text,
                            SearchManager.getAnalyzer(language, true));

                    if ((startsWithOnly && StringUtils.startsWithIgnoreCase(analyzedText, analyzedSearchValue))
                            || (!startsWithOnly
                                    && StringUtils.containsIgnoreCase(analyzedText, analyzedSearchValue))
                            || (startsWithOnly
                                    && StringUtils.startsWithIgnoreCase(text, searchValueWithoutWildcard))
                            || (!startsWithOnly
                                    && StringUtils.containsIgnoreCase(text, searchValueWithoutWildcard))) {
                        TermFrequency freq = new TermFrequency(text, termEnum.docFreq());
                        termList.add(freq);
                    }
                }
                term = termEnum.next();
            }
        }
    } finally {
        releaseIndexReader(indexAndTaxonomy);
    }
    return termList;
}

From source file:org.hexlogic.model.DockerNode.java

@VsoMethod(showInApi = true, name = "pullImage", description = "Pull the image matching the given string from the docker hub repository, saving it on the docker host.")
public String pullImage(String imageName) throws Exception {
    log.debug("Pulling image '" + imageName + "'...");

    @SuppressWarnings("rawtypes")
    MappingIterator<Map> it = null;
    try {/*from  w  w  w.  j  a  v  a  2  s. c  o m*/
        configureNode();
        DockerClient dockerClient = DockerClientBuilder.getInstance(config).build();
        log.debug("Starting pull operation...");

        /*
         * We will check the final result by comparing the initial image id, which is the first ID provided by the stream such as:
         * 
         * {status=Pulling image (latest) from dockerfile/nodejs, progressDetail={}, id=406eb4a4dcad}
         * 
         * to the image id of the last entity which owns id AND status which will look something like:
         * {status=Download complete, progressDetail={}, id=406eb4a4dcad}
         * 
         * If both IDs match, we know that the latest layer is the same as the requested image layer.
         * So the next step is to compare the download status of that layer
         */
        String firstId = null;
        String lastId = "";
        String lastStatus = "undefined";

        /*
         * In addition to the download status of the layer, we provide additional information about how the process went by
         * returning information to the user using the last entity which has no id and only a status, which looks like this:
         * {status=Status: Image is up to date for dockerfile/nodejs}
         * or
         * {status=Status: Downloaded newer image for dockerfile/nodejs}
         * or
         * {status=Repository dockerfile/nodejs already being pulled by another client. Waiting.}
         */
        String finalStatus = "undefined";

        for (it = new ObjectMapper().readValues(
                new JsonFactory().createJsonParser(dockerClient.pullImageCmd(imageName).exec()), Map.class); it
                        .hasNext();) {
            Map<?, ?> element = it.next();
            String id = "";
            String status = "";
            String progress = "";

            // info OUTPUT
            // log.debug("info: " + element);

            try {
                id = element.get("id").toString();
            } catch (NullPointerException e) {/* catch exception if key was not found */
            }
            try {
                status = element.get("status").toString();
            } catch (NullPointerException e) {/* catch exception if key was not found */
            }
            try {
                progress = element.get("progress").toString();
            } catch (NullPointerException e) {/* catch exception if key was not found */
            }

            // if the key was found and we got some status
            if (!id.isEmpty() && !status.isEmpty()) {
                // remember the first id of the output stream, which is the id of the image we want to pull
                if (firstId == null) {
                    log.debug("Remembering first id: " + id);
                    firstId = id;
                }

                // if the same layer is returned multiple times in a row, don't log everything but just the progress
                if (id.equals(lastId)) {
                    lastId = id;
                    lastStatus = status;
                    if (!progress.isEmpty()) {
                        log.debug("Progress: " + progress);
                    }
                } else {
                    lastId = id;
                    log.debug("Image '" + id + "' status is: " + status + ".");
                    if (!progress.isEmpty()) {
                        log.debug("Progress: " + progress);
                    }
                }
            }

            if (!status.isEmpty()) {
                finalStatus = status;
            }
        }

        // TODO find a more robust way to handle downloadStatus and finalStatus
        String downloadStatus = "undefined";
        if (lastId.equals(firstId)) {
            log.debug("Last download layer id does match the requested image id: " + firstId);
            if (StringUtils.containsIgnoreCase(lastStatus, "Download complete")) {
                downloadStatus = "successed";
                log.debug("The requested layer was downloaded successfuly.");
            } else {
                downloadStatus = "failed";
                log.error("The requested layer failed to download.");
                // throw exception in order for the workflow to fail
                throw new IllegalStateException("The requested layer failed to download.");
            }
        }

        // reload images from docker node
        this.reloadImages();
        // update inventory - another way to do this would be to update our ArrayList and call notifyElementDeleted on the image object
        notificationHandler.notifyElementInvalidate(toRef());

        log.debug("Pull operation " + downloadStatus + ". " + finalStatus + ".");
        return "Pull operation " + downloadStatus + ". " + finalStatus + ".";

    } catch (InternalServerErrorException e) {
        // image dosn't exist
        log.error("Error: the image was not found.");
        // Throw error detail message so vCO can display it
        throw new Exception("Error: the image was not found.");
    } catch (Exception e) {
        final StringWriter sw = new StringWriter();
        final PrintWriter pw = new PrintWriter(sw, true);
        e.printStackTrace(pw);

        log.error("Error while pulling image: " + sw.getBuffer().toString());
        // Throw error detail message so vCO can display it
        throw new Exception("Error while pulling image: " + sw.getBuffer().toString());
    } finally {
        if (it != null) {
            log.debug("Closeing pullImage stream...");
            it.close();
            log.debug("Closed pullImage stream.");
        }
    }

}

From source file:org.hibernate.search.test.performance.task.QueryBooksBySummaryTask.java

private void assertResult(List<Book> result, String phrase) {
    for (Book book : result) {
        assertTrue(//from  w  w  w  . java2s .c  o  m
                "QueryBooksBySummaryTask: phrase=" + phrase + ", summary="
                        + StringUtils.substring(book.getSummary(), 0, 50),
                StringUtils.containsIgnoreCase(book.getSummary(), phrase));
    }
}

From source file:org.hippoecm.repository.util.RepoUtils.java

public static String encodeXpath(String xpath) {
    final int whereClauseIndexStart = xpath.indexOf("[");
    final int whereClauseIndexEnd = xpath.lastIndexOf("]");
    final String orderByString = " order by ";
    if (whereClauseIndexStart > -1 && whereClauseIndexEnd > -1) {
        String beforeWhere = xpath.substring(0, whereClauseIndexStart);
        String afterWhere = xpath.substring(whereClauseIndexEnd + 1, xpath.length());
        // in where clause we can have path constraints
        String whereClause = "[" + xpath.substring(whereClauseIndexStart + 1, whereClauseIndexEnd) + "]";
        return encodePathConstraint(beforeWhere) + whereClause + afterWhere;
    } else if (StringUtils.containsIgnoreCase(xpath, orderByString)) {
        int orderByIndex = StringUtils.indexOfIgnoreCase(xpath, orderByString);
        return encodePathConstraint(xpath.substring(0, orderByIndex)) + xpath.substring(orderByIndex);
    } else if (whereClauseIndexStart == -1 && whereClauseIndexEnd == -1) {
        // only path
        return encodePathConstraint(xpath);
    } else {/*  w  w w.j  av a2 s .co  m*/
        // most likely incorrect query
        return xpath;
    }

}

From source file:org.hyperic.plugin.vrealize.automation.VRAUtils.java

public static String getFqdn(String address) {
    String parsedAddress = parseAddress(address);
    if (StringUtils.isBlank(parsedAddress) || StringUtils.containsIgnoreCase(parsedAddress, "localhost")) {
        if (StringUtils.isNotBlank(getLocalFqdn())) {
            return getLocalFqdn();
        }//from  ww w  . j  a  v a 2 s. c  o  m
    }

    return parsedAddress;
}

From source file:org.intellij.erlang.settings.ErlangExternalToolsConfigurable.java

private void validateEmacsPath() {
    String version = ExtProcessUtil.restrictedTimeExec(myEmacsPathSelector.getText() + " --version", 3000);
    String[] split = StringUtils.split(version, "\n");
    if (StringUtils.containsIgnoreCase(version, "emacs") && split.length > 0) {
        myEmacsVersionText.setText(ArrayUtil.getFirstElement(split));
    } else {//  w w w.  j ava 2s  .c  o  m
        myEmacsVersionText.setText("N/A");
    }
}

From source file:org.intermine.bio.dataconversion.ModEncodeMetaDataProcessor.java

/**
 * =====================/*from w w w  .j  a  va  2  s .  co  m*/
 *    RESULT FILES
 * =====================
 */
private void createResultFiles(Connection connection) throws ObjectStoreException {
    long bT = System.currentTimeMillis(); // to monitor time spent in the process

    for (Integer submissionId : submissionDataMap.keySet()) {
        // the applied data is repeated for each protocol
        // so we want to uniquefy the created object
        Set<String> subFiles = new HashSet<String>();
        for (Integer dataId : submissionDataMap.get(submissionId)) {
            AppliedData ad = appliedDataMap.get(dataId);
            // now checking only for 'file', not 'result file'
            if (StringUtils.containsIgnoreCase(ad.type, "file")) {
                if (!StringUtils.isBlank(ad.value) && !subFiles.contains(ad.value)) {
                    String direction = null;

                    if (StringUtils.containsIgnoreCase(ad.type, "result")) {
                        direction = "result";
                    } else {
                        direction = "input";
                    }
                    createResultFile(ad.value, ad.name, ad.url, direction, submissionId);
                    subFiles.add(ad.value);
                }
            }
        }
    }
    LOG.info("TIME creating ResultFile objects: " + (System.currentTimeMillis() - bT) + " ms");
}

From source file:org.isatools.isatab.export.isatab.pipeline.wrapper_nodes.GraphElementWrapperNode.java

/**
 * Tells if the string contains one of the matches. This is often used to detect which type of material/data one has.
 * TODO: Was moved to {@link StringSearchUtils}, replace it.
 */// w  w  w.  j  a v  a 2s  . c  om
protected static boolean containsOne(String target, String... matches) {
    if (target == null) {
        throw new TabInternalErrorException("containsOne(): target is null!");
    }
    if (matches == null || matches.length == 0) {
        throw new TabInternalErrorException("containsOne(): target is null!");
    }
    for (String match : matches) {
        if (StringUtils.containsIgnoreCase(target, match)) {
            return true;
        }
    }
    return false;
}