Example usage for java.lang String CASE_INSENSITIVE_ORDER

List of usage examples for java.lang String CASE_INSENSITIVE_ORDER

Introduction

In this page you can find the example usage for java.lang String CASE_INSENSITIVE_ORDER.

Prototype

Comparator CASE_INSENSITIVE_ORDER

To view the source code for java.lang String CASE_INSENSITIVE_ORDER.

Click Source Link

Document

A Comparator that orders String objects as by compareToIgnoreCase .

Usage

From source file:org.apache.falcon.resource.extensions.ExtensionManager.java

@GET
@Path("list/{extension-name}")
@Produces({ MediaType.TEXT_XML, MediaType.APPLICATION_JSON })
public ExtensionJobList getExtensionJobs(@PathParam("extension-name") String extensionName,
        @DefaultValue("") @QueryParam("fields") String fields,
        @DefaultValue(ASCENDING_SORT_ORDER) @QueryParam("sortOrder") String sortOrder,
        @DefaultValue("0") @QueryParam("offset") Integer offset,
        @QueryParam("numResults") Integer resultsPerPage,
        @DefaultValue("") @QueryParam("doAs") String doAsUser) {
    checkIfExtensionServiceIsEnabled();/*from  ww  w  .j  a  v  a  2  s  . c  om*/
    resultsPerPage = resultsPerPage == null ? getDefaultResultsPerPage() : resultsPerPage;
    try {
        // get filtered entities
        List<Entity> entities = getEntityList("", "", "", TAG_PREFIX_EXTENSION_NAME + extensionName, "",
                doAsUser);
        if (entities.isEmpty()) {
            return new ExtensionJobList(0);
        }

        // group entities by extension job name
        Map<String, List<Entity>> groupedEntities = groupEntitiesByJob(entities);

        // sort by extension job name
        List<String> jobNames = new ArrayList<>(groupedEntities.keySet());
        switch (sortOrder.toLowerCase()) {
        case DESCENDING_SORT_ORDER:
            Collections.sort(jobNames, Collections.reverseOrder(String.CASE_INSENSITIVE_ORDER));
            break;
        default:
            Collections.sort(jobNames, String.CASE_INSENSITIVE_ORDER);
        }

        // pagination and format output
        int pageCount = getRequiredNumberOfResults(jobNames.size(), offset, resultsPerPage);
        HashSet<String> fieldSet = new HashSet<>(Arrays.asList(fields.toUpperCase().split(",")));
        ExtensionJobList jobList = new ExtensionJobList(pageCount);
        for (int i = offset; i < offset + pageCount; i++) {
            String jobName = jobNames.get(i);
            List<Entity> jobEntities = groupedEntities.get(jobName);
            EntityList entityList = new EntityList(buildEntityElements(fieldSet, jobEntities),
                    jobEntities.size());
            jobList.addJob(new ExtensionJobList.JobElement(jobName, entityList));
        }
        return jobList;
    } catch (FalconException | IOException e) {
        LOG.error("Failed to get extension job list of " + extensionName + ": ", e);
        throw FalconWebException.newAPIException(e, Response.Status.INTERNAL_SERVER_ERROR);
    }
}

From source file:org.structr.websocket.command.WebappDataCommand.java

public static List<String> listValues(final String category) {

    final File baseDir = new File(Paths.get(getValuesPath(), category).toString());
    final List<String> fileNames = new LinkedList<>();

    if (baseDir.exists()) {

        final String[] names = baseDir.list();
        if (names != null) {

            fileNames.addAll(Arrays.asList(names));
        }// w ww  .j a v  a 2 s. c  o  m
    }

    Collections.sort(fileNames, String.CASE_INSENSITIVE_ORDER);

    return fileNames;
}

From source file:annis.gui.flatquerybuilder.SpanBox.java

@Override
public void textChange(FieldEvents.TextChangeEvent event) {
    String txt = event.getText();
    HashMap<Integer, Collection> levdistvals = new HashMap<Integer, Collection>();
    if (txt.length() > 1) {
        cb.removeAllItems();//from  ww  w  . ja  v  a 2  s . co m
        for (String s : annonames) {
            Integer d = StringUtils.getLevenshteinDistance(removeAccents(txt), removeAccents(s));
            if (levdistvals.containsKey(d)) {
                levdistvals.get(d).add(s);
            }
            if (!levdistvals.containsKey(d)) {
                Set<String> newc = new TreeSet<String>();
                newc.add(s);
                levdistvals.put(d, newc);
            }
        }
        SortedSet<Integer> keys = new TreeSet<Integer>(levdistvals.keySet());
        for (Integer k : keys.subSet(0, 5)) {
            List<String> values = new ArrayList(levdistvals.get(k));
            Collections.sort(values, String.CASE_INSENSITIVE_ORDER);
            for (String v : values) {
                cb.addItem(v);
            }
        }
    }
}

From source file:cn.ctyun.amazonaws.auth.AWS4Signer.java

protected String getCanonicalizedHeaderString(Request<?> request) {
    List<String> sortedHeaders = new ArrayList<String>();
    sortedHeaders.addAll(request.getHeaders().keySet());
    Collections.sort(sortedHeaders, String.CASE_INSENSITIVE_ORDER);

    StringBuilder buffer = new StringBuilder();
    for (String header : sortedHeaders) {
        buffer.append(header.toLowerCase().replaceAll("\\s+", " ") + ":"
                + request.getHeaders().get(header).replaceAll("\\s+", " "));
        buffer.append("\n");
    }//  w w w .  j a  v a2s .c om

    return buffer.toString();
}

From source file:com.stimulus.archiva.presentation.ConfigBean.java

public List<String> getRuleFields() {
    ArrayList<String> list = new ArrayList<String>();
    EmailFields emailFields = Config.getConfig().getEmailFields();
    for (EmailField ef : emailFields.getAvailableFields().values()) {
        if (!Compare.equalsIgnoreCase(ef.getName(), "body")
                && !Compare.equalsIgnoreCase(ef.getName(), "attachments")) {
            list.add(ef.getName());//w w w .  j  a v  a 2s  .co  m
        }
    }
    list.add("addresses");
    Collections.sort(list, String.CASE_INSENSITIVE_ORDER);
    return list;
}

From source file:com.pearson.pdn.learningstudio.oauth.OAuth1SignatureService.java

/**
 * Normalizes all OAuth signable parameters and url query parameters
 * according to OAuth 1.0/*from   w  ww  .  j av  a  2s.  c  o  m*/
 * 
 * @param httpMethod
 *            The upper-cased HTTP method
 * @param URL
 *            The request URL
 * @param oauthParams
 *            The associative set of signable oAuth parameters
 * @param requstBody
 *            The serialized POST/PUT message body
 * 
 * @return A string containing normalized and encoded oAuth parameters
 * 
 * @throws UnsupportedEncodingException
 */
private String normalizeParams(String httpMethod, URL url, Map<String, String> oauthParams, byte[] requestBody)
        throws UnsupportedEncodingException {

    // Sort the parameters in lexicographical order, 1st by Key then by Value
    Map<String, String> kvpParams = new TreeMap<String, String>(String.CASE_INSENSITIVE_ORDER);
    kvpParams.putAll(oauthParams);

    // Place any query string parameters into a key value pair using equals
    // ("=") to mark
    // the key/value relationship and join each parameter with an ampersand
    // ("&")
    if (url.getQuery() != null) {
        for (String keyValue : url.getQuery().split("&")) {
            String[] p = keyValue.split("=");
            kvpParams.put(p[0], p[1]);
        }

    }

    // Include the body parameter if dealing with a POST or PUT request
    if ("POST".equals(httpMethod) || "PUT".equals(httpMethod)) {
        String body = Base64.encodeBase64String(requestBody).replaceAll("\r\n", "");
        // url encode the body 2 times now before combining other params
        body = URLEncoder.encode(body, "UTF-8");
        body = URLEncoder.encode(body, "UTF-8");
        kvpParams.put("body", body);
    }

    // separate the key and values with a "="
    // separate the kvp with a "&"
    StringBuilder combinedParams = new StringBuilder();
    String delimiter = "";
    for (String key : kvpParams.keySet()) {
        combinedParams.append(delimiter);
        combinedParams.append(key);
        combinedParams.append("=");
        combinedParams.append(kvpParams.get(key));
        delimiter = "&";
    }

    // url encode the entire string again before returning
    return URLEncoder.encode(combinedParams.toString(), "UTF-8");
}

From source file:org.structr.websocket.command.LayoutsCommand.java

public static List<String> listLayouts() {

    final File baseDir = new File(getBasePath());
    final List<String> fileNames = new LinkedList<>();

    if (baseDir.exists()) {

        final String[] names = baseDir.list();
        if (names != null) {

            fileNames.addAll(Arrays.asList(names));
        }/* w  w  w .  j av  a  2  s .  c  o m*/
    }

    Collections.sort(fileNames, String.CASE_INSENSITIVE_ORDER);

    return fileNames;
}

From source file:nz.govt.natlib.adapter.arc.ArcAdapter.java

public void adapt(File file, ParserContext ctx) throws IOException {
    ArcReader arcReader = null;/*from  ww w.  j  a  v a 2s.  co m*/
    try {
        // Get the reader (either compressed or uncompressed)
        arcReader = getArcReader(file);
        // Get an iterator over the arc records
        Iterator<ArcRecordBase> iter = arcReader.iterator();
        // Reference to the first record which is the "archive metadata record"
        ArcMetadata arcMetadata = null;
        // Map to hold the mime type statistics
        HashMap<String, Integer> mimeMap = new HashMap();
        // Iterate over the arc records
        while (iter != null && iter.hasNext()) {
            ArcRecordBase record = iter.next();
            // First record is the Archive Metadata record. Get hold of its reference
            if (record instanceof ArcVersionBlock) {
                ArcVersionBlock arcMetadataRecord = (ArcVersionBlock) record;
                // Extract the metadata from the XML data that this arc record holds
                arcMetadata = parseArcMetadataRecord(arcMetadataRecord);
            }
            addMimeTypeToMimeMap(record, mimeMap);
            record.close();
        }
        ctx.fireStartParseEvent("ARC");
        writeFileInfo(file, ctx);

        // Write the <ARCMETADATA> element
        if (arcMetadata != null) {
            ctx.fireStartParseEvent("ARCMETADATA");
            ctx.fireParseEvent("SOFTWARE", arcMetadata.software);
            ctx.fireParseEvent("HOSTNAME", arcMetadata.hostname);
            ctx.fireParseEvent("IP", arcMetadata.ip);
            ctx.fireParseEvent("OPERATOR", arcMetadata.operator);
            ctx.fireParseEvent("CREATEDDATE", arcMetadata.createdDate);
            ctx.fireParseEvent("ROBOTPOLICY", arcMetadata.robotPolicy);
            ctx.fireParseEvent("ARCFORMAT", arcMetadata.arcFormat);
            ctx.fireParseEvent("CONFORMSTO", arcMetadata.conformsTo);
            ctx.fireEndParseEvent("ARCMETADATA");
        }

        // Write the <ARCINFO> element
        ctx.fireStartParseEvent("ARCINFO");
        ctx.fireParseEvent("COMPRESSED", arcReader.isCompressed());

        ctx.fireStartParseEvent("CONTENTSUMMARY");

        if (mimeMap.size() > 0) {
            Set<String> keys = new TreeSet<String>(String.CASE_INSENSITIVE_ORDER);
            keys.addAll(mimeMap.keySet());
            Iterator<String> keyIterator = keys.iterator();
            StringBuffer mimeSummary = new StringBuffer();
            boolean first = true;
            while (keyIterator != null && keyIterator.hasNext()) {
                String mimetype = (String) keyIterator.next();
                if (first == false) {
                    mimeSummary.append(", ");
                }
                first = false;
                mimeSummary.append(mimetype).append(":").append(mimeMap.get(mimetype));
            }
            ctx.fireParseEvent("MIMEREPORT", mimeSummary.toString());
        }
        ctx.fireEndParseEvent("CONTENTSUMMARY");
        ctx.fireEndParseEvent("ARCINFO");
        ctx.fireEndParseEvent("ARC");
    } catch (Throwable ex) {
        System.out.println("Exception: " + ex);
        ex.printStackTrace();
    } finally {
        if (arcReader != null)
            arcReader.close();
    }
}

From source file:org.eumetsat.usd.gcp.server.data.NetCDFCalibrationDataManager.java

/**
 * {@inheritDoc}/*  ww w . ja v  a2 s  . c o  m*/
 */
@Override
public void addDataFromDatasetForUser(String userID, String datasetURL, String channelName, double userSceneTb)
        throws DatasetReadException, InvalidFormatException, InvalidFilenameException {
    // Download file to speed reading up.
    NetcdfFile ncfile = NetcdfUtils.downloadAndOpenFile(datasetURL);

    // Extract the conversion formulas.
    Set<String> convVarsNames = new TreeSet<String>(String.CASE_INSENSITIVE_ORDER); // init conversion var names
                                                                                    // list, case-insensitive.

    GlobalAttributesNames globalAttrNames = configManager.getGlobalAttributesNames();

    Attribute radToTbFormulaAttr = ncfile
            .findGlobalAttributeIgnoreCase(globalAttrNames.getRadToTbConvFormula());
    Attribute tbToRadFormulaAttr = ncfile
            .findGlobalAttributeIgnoreCase(globalAttrNames.getTbToRadConvFormula());

    String radToTbConvFormula = configManager.getGlobalAttributesDefaults().getRadToTbConvFormula(); // default.
    if (radToTbFormulaAttr != null) {
        try {
            radToTbConvFormula = processRadianceToTbFormula(radToTbFormulaAttr.getStringValue().split("=")[1],
                    convVarsNames);

        } catch (FormulaException fe) {
            LOGGER.warn("invalid radiance to tb conversion formula in <" + FilenameUtils.getName(datasetURL)
                    + ">. Using default.", fe);
        }
    } else {
        LOGGER.warn("radiance to tb conversion formula not found. Using default.");
    }

    String tbToRadConvFormula = configManager.getGlobalAttributesDefaults().getRadToTbConvFormula(); // default.
    if (tbToRadFormulaAttr != null) {
        try {
            tbToRadConvFormula = processTbToRadianceFormula(tbToRadFormulaAttr.getStringValue().split("=")[1],
                    convVarsNames);

        } catch (FormulaException fe) {
            LOGGER.warn("invalid tb to radiance conversion formula in <" + FilenameUtils.getName(datasetURL)
                    + ">. Using default.", fe);
        }
    } else {
        LOGGER.warn("tb to radiance conversion formula not found. Using default.");
    }

    // Extract the variables from the NetCDF file -------------------
    try {
        VariablesNames varNames = configManager.getVariablesNames();

        // Get list of channel indexes to retrieve (All channels or single channel)
        Array channelNames = NetcdfUtils.readVariable(ncfile, varNames.getChannelName());

        int firstChannelNum = 0;
        int lastChannelNum = 0;

        if (channelName.equalsIgnoreCase("All")) {
            firstChannelNum = 0;
            lastChannelNum = NetcdfUtils.getNumRowsOf(channelNames) - 1;

        } else {
            firstChannelNum = NetcdfUtils.getIndexOf(channelName, channelNames);

            if (firstChannelNum == -1) {
                throw new InvalidFormatException("'" + channelName + "' not found in the NetCDF file <"
                        + FilenameUtils.getName(datasetURL) + ">.");
            }

            lastChannelNum = firstChannelNum;
        }

        for (int channelNum = firstChannelNum; channelNum <= lastChannelNum; channelNum++) {
            // Get data array from the netCDF file.
            Array dateArray = NetcdfUtils.readVariable(ncfile, varNames.getDate());
            Array offsetArray = NetcdfUtils.readVariable(ncfile, varNames.getOffset(), channelNum);
            Array slopeArray = NetcdfUtils.readVariable(ncfile, varNames.getSlope(), channelNum);
            Array offsetSeArray = NetcdfUtils.readVariable(ncfile, varNames.getOffsetSe(), channelNum);
            Array slopeSeArray = NetcdfUtils.readVariable(ncfile, varNames.getSlopeSe(), channelNum);

            // Flag with Double.POSITIVE_INFINITY if slope is equal to 0.
            for (int i = 0; i < slopeArray.getSize(); i++) {
                if (Double.compare(slopeArray.getDouble(i), 0) == 0) {
                    slopeArray.setDouble(i, Double.POSITIVE_INFINITY);
                }
            }

            // TODO: check if this workaround to support inconsistent datasets can be removed.
            Array covarianceArray = null;
            try {
                covarianceArray = NetcdfUtils.readVariable(ncfile, varNames.getCovariance(), channelNum);

            } catch (VariableNotFoundException vnfe) {
                try {
                    covarianceArray = NetcdfUtils.readVariable(ncfile, "covar_of_offset_and_slope", channelNum);

                } catch (VariableNotFoundException vnfe2) {
                    covarianceArray = NetcdfUtils.readVariable(ncfile, "covar", channelNum);
                }
            }

            // Get stdSceneTb if not defined.
            double sceneTb = 0.0;

            if (Double.compare(userSceneTb, -1.0) == 0) {
                sceneTb = NetcdfUtils.readVariable(ncfile, varNames.getStdSceneTb()).getDouble(channelNum);

            } else {
                sceneTb = userSceneTb;
            }

            NetcdfFilename ncfilename = NetcdfFilename.parse(FilenameUtils.getName(datasetURL));
            String currentChannelName = NetcdfUtils.getStringFrom(channelNum, channelNames);

            // Format timestamp.
            String timestamp = DateUtils.format(
                    DateUtils.parse(ncfilename.getTimestamp(), "yyyyMMddHHmmss", "GMT"), "yyyy/MM/dd HH:mm:ss",
                    "GMT");

            // Construct the dataset name.
            String datasetName = ncfilename.getSatellite() + "/" + ncfilename.getInstrument()
                    + " referenced with " + ncfilename.getRefSatellite() + "/" + ncfilename.getRefInstrument()
                    + " [" + ncfilename.getLocationIndication().split("-")[1] + "]["
                    + ncfilename.getCorrectionType() + "][" + ncfilename.getMode() + "][" + timestamp + "][v"
                    + ncfilename.getVersion() + "][" + currentChannelName + "][" + sceneTb + "K]";

            // Add new records.
            addCalibrationRecords(userID, ncfile, datasetName, datasetURL, dateArray, offsetArray,
                    offsetSeArray, slopeArray, slopeSeArray, covarianceArray, channelNum, sceneTb,
                    radToTbConvFormula, tbToRadConvFormula, convVarsNames);
        }

    } catch (BadArgumentException bae) {
        throw new InvalidFormatException(
                "Format of NetCDF file <" + FilenameUtils.getName(datasetURL) + "> is invalid.", bae);

    } catch (ParseException pe) {
        throw new InvalidFormatException("Timestamp with invalid format.", pe);

    } catch (VariableNotFoundException vnfe) {
        throw new InvalidFormatException("Variable '" + vnfe.getVariableName() + "' not found in NetCDF file <"
                + FilenameUtils.getName(datasetURL) + ">.", vnfe);

    } catch (VariableReadException vre) {
        throw new InvalidFormatException("Variable '" + vre.getVariableName() + "' not found in NetCDF file <"
                + FilenameUtils.getName(datasetURL) + ">.", vre);

    } catch (ChannelNotFoundException cnfe) {
        throw new InvalidFormatException("Channel number '" + cnfe.getChannelNum() + "' not found in variable '"
                + cnfe.getVariableName() + "' in in NetCDF file <" + FilenameUtils.getName(datasetURL) + ">.",
                cnfe);

    } finally {
        // Clean-up
        if (ncfile != null) {
            try {
                ncfile.close();

            } catch (IOException ioe) {
                LOGGER.error("trying to close the NetcdfFile", ioe);
            }
        }
    }

}

From source file:org.apache.kylin.rest.util.ValidateUtil.java

private Set<String> getAllColumns(String project, String table) throws IOException {
    List<TableDesc> tableDescByProject = tableService.getTableDescByProject(project, true);
    Set<String> cols = new TreeSet<>(String.CASE_INSENSITIVE_ORDER);

    for (TableDesc tableDesc : tableDescByProject) {
        String tbl = tableDesc.getDatabase() + "." + tableDesc.getName();
        if (tbl.equalsIgnoreCase(table)) {
            for (ColumnDesc column : tableDesc.getColumns()) {
                cols.add(column.getName());
            }/*from   ww  w.  j  av a2 s .c o m*/
            break;
        }
    }
    return cols;
}