Example usage for java.nio.charset Charset equals

List of usage examples for java.nio.charset Charset equals

Introduction

In this page you can find the example usage for java.nio.charset Charset equals.

Prototype

public final boolean equals(Object ob) 

Source Link

Document

Tells whether or not this object is equal to another.

Usage

From source file:Main.java

public static void main(String[] args) {
    Charset csets = Charset.forName("UTF-8");
    Charset csets1 = Charset.forName("UTF-8");
    System.out.println(csets.equals(csets1));

}

From source file:Main.java

public static void main(String[] args) {
    Charset csets = Charset.forName("UTF16");
    Charset csets1 = Charset.forName("UTF-8");
    System.out.println(csets.equals(csets1));

}

From source file:org.gradle.api.internal.PropertiesUtils.java

/**
 * Writes a {@link java.util.Properties} in a way that the results can be expected to be reproducible.
 *
 * <p>There are a number of differences compared to {@link java.util.Properties#store(java.io.Writer, String)}:</p>
 * <ul>//www  .  j a v a 2  s  .  com
 *     <li>no timestamp comment is generated at the beginning of the file</li>
 *     <li>the lines in the resulting files are separated by a pre-set separator (defaults to
 *         {@literal '\n'}) instead of the system default line separator</li>
 *     <li>the properties are sorted alphabetically</li>
 * </ul>
 *
 * <p>Like with {@link java.util.Properties#store(java.io.OutputStream, String)}, Unicode characters are
 * escaped when using the default Latin-1 (ISO-8559-1) encoding.</p>
 */
public static void store(Properties properties, OutputStream outputStream, String comment, Charset charset,
        String lineSeparator) throws IOException {
    String rawContents;
    if (charset.equals(Charsets.ISO_8859_1)) {
        ByteArrayOutputStream out = new ByteArrayOutputStream();
        properties.store(out, comment);
        rawContents = new String(out.toByteArray(), Charsets.ISO_8859_1);
    } else {
        StringWriter out = new StringWriter();
        properties.store(out, comment);
        rawContents = out.toString();
    }

    String systemLineSeparator = SystemProperties.getInstance().getLineSeparator();
    List<String> lines = Lists
            .newArrayList(Splitter.on(systemLineSeparator).omitEmptyStrings().split(rawContents));
    int lastCommentLine = -1;
    for (int lineNo = 0, len = lines.size(); lineNo < len; lineNo++) {
        String line = lines.get(lineNo);
        if (line.startsWith("#")) {
            lastCommentLine = lineNo;
        }
    }

    // The last comment line is the timestamp
    List<String> nonCommentLines;
    if (lastCommentLine != -1) {
        lines.remove(lastCommentLine);
        nonCommentLines = lines.subList(lastCommentLine, lines.size());
    } else {
        nonCommentLines = lines;
    }

    Collections.sort(nonCommentLines);
    String contents = Joiner.on(lineSeparator).join(lines);
    outputStream.write(contents.getBytes(charset));
}

From source file:org.gradle.internal.util.PropertiesUtils.java

/**
 * Writes {@link java.util.Properties} in a way that the results can be expected to be reproducible.
 *
 * <p>There are a number of differences compared to {@link java.util.Properties#store(java.io.Writer, String)}:</p>
 * <ul>//from w  w w.j a  v  a2s.  co m
 *     <li>no timestamp comment is generated at the beginning of the file</li>
 *     <li>the lines in the resulting files are separated by a pre-set separator instead of the system default line separator</li>
 *     <li>the properties are sorted alphabetically</li>
 * </ul>
 *
 * <p>Like with {@link java.util.Properties#store(java.io.OutputStream, String)}, Unicode characters are
 * escaped when using the default Latin-1 (ISO-8559-1) encoding.</p>
 */
public static void store(Properties properties, OutputStream outputStream, @Nullable String comment,
        Charset charset, String lineSeparator) throws IOException {
    String rawContents;
    if (charset.equals(Charsets.ISO_8859_1)) {
        ByteArrayOutputStream out = new ByteArrayOutputStream();
        properties.store(out, comment);
        rawContents = new String(out.toByteArray(), Charsets.ISO_8859_1);
    } else {
        StringWriter out = new StringWriter();
        properties.store(out, comment);
        rawContents = out.toString();
    }

    String systemLineSeparator = SystemProperties.getInstance().getLineSeparator();
    List<String> lines = Lists
            .newArrayList(Splitter.on(systemLineSeparator).omitEmptyStrings().split(rawContents));
    int lastCommentLine = -1;
    for (int lineNo = 0, len = lines.size(); lineNo < len; lineNo++) {
        String line = lines.get(lineNo);
        if (line.startsWith("#")) {
            lastCommentLine = lineNo;
        }
    }

    // The last comment line is the timestamp
    List<String> nonCommentLines;
    if (lastCommentLine != -1) {
        lines.remove(lastCommentLine);
        nonCommentLines = lines.subList(lastCommentLine, lines.size());
    } else {
        nonCommentLines = lines;
    }

    Collections.sort(nonCommentLines);
    StringBuilder builder = new StringBuilder();
    for (String line : lines) {
        builder.append(line);
        builder.append(lineSeparator);
    }
    outputStream.write(builder.toString().getBytes(charset));
}

From source file:jp.opencollector.guacamole.auth.delegated.DelegatedAuthenticationProvider.java

private static JsonParser createJsonParser(InputStream is, Charset charset, ObjectCodec codec) {
    final IOContext ctxt = new IOContext(new BufferRecycler(), is, false);
    if (charset.equals(UTF_8)) {
        final byte[] buf = ctxt.allocReadIOBuffer();
        return new UTF8StreamJsonParser(ctxt, 0, is, codec,
                byteSymbolCanonicalizer.makeChild(JsonFactory.Feature.CANONICALIZE_FIELD_NAMES.getMask()), buf,
                0, 0, true);/*  w ww .java 2s.c o  m*/
    } else {
        return new ReaderBasedJsonParser(ctxt, 0, new InputStreamReader(is, charset), codec,
                symbolCanonicalizer.makeChild(JsonFactory.Feature.CANONICALIZE_FIELD_NAMES.getMask()));
    }
}

From source file:com.microsoft.azure.management.datalake.store.uploader.StringExtensions.java

/**
 * Finds the index in the given buffer of a newline character, either the first or the last (based on the parameters).
 * If a combined newline (\r\n), the index returned is that of the last character in the sequence.
 *
 * @param buffer The buffer to search in.
 * @param startOffset The index of the first byte to start searching at.
 * @param length The number of bytes to search, starting from the given startOffset.
 * @param reverse If true, searches from the startOffset down to the beginning of the buffer. If false, searches upwards.
 * @param encoding Indicates the type of encoding to use for the buffered bytes.
 * @param delimiter Optionally indicates the delimiter to consider as the "new line", which MUST BE a single character. If null, the default is '\\r', '\\n' and '\\r\\n'.
 * @return The index of the closest newline character in the sequence (based on direction) that was found. Returns -1 if not found.
 *//* www  . ja  v a 2s .c  o  m*/
public static int findNewline(byte[] buffer, int startOffset, int length, boolean reverse, Charset encoding,
        String delimiter) {
    if (buffer.length == 0 || length == 0) {
        return -1;
    }

    // define the bytes per character to use
    int bytesPerChar;
    if (encoding.equals(StandardCharsets.UTF_16) || encoding.equals(StandardCharsets.UTF_16BE)
            || encoding.equals(StandardCharsets.UTF_16LE)) {
        bytesPerChar = 2;
    } else if (encoding.equals(StandardCharsets.US_ASCII) || encoding.equals(StandardCharsets.UTF_8)) {
        bytesPerChar = 1;
    } else {
        throw new IllegalArgumentException(
                "Only the following encodings are allowed: UTF-8, UTF-16, UTF-16BE, UTF16-LE and ASCII");
    }

    if (delimiter != null && !StringUtils.isEmpty(delimiter) && delimiter.length() > 1) {
        throw new IllegalArgumentException(
                "The delimiter must only be a single character or unspecified to represent the CRLF delimiter");
    }

    if (delimiter != null && !StringUtils.isEmpty(delimiter)) {
        // convert the byte array back to a String
        int startOfSegment = reverse ? startOffset - length + 1 : startOffset;
        String bytesToString = new String(buffer, startOfSegment, length, encoding);
        if (!bytesToString.contains(delimiter)) {
            // didn't find the delimiter.
            return -1;
        }

        // the index is returned, which is 0 based, so our loop must include the zero case.
        int numCharsToDelim = reverse ? bytesToString.lastIndexOf(delimiter) : bytesToString.indexOf(delimiter);
        int toReturn = 0;
        for (int i = 0; i <= numCharsToDelim; i++) {
            toReturn += Character.toString(bytesToString.charAt(startOfSegment + i)).getBytes(encoding).length;
        }

        // we get the total number of bytes, but we want to return the index (which starts at 0)
        // so we subtract 1 from the total number of bytes to get the final byte index.
        return toReturn - 1;
    }

    //endOffset is a 'sentinel' value; we use that to figure out when to stop searching
    int endOffset = reverse ? startOffset - length : startOffset + length;

    // if we are starting at the end, we need to move toward the front enough to grab the right number of bytes
    startOffset = reverse ? startOffset - (bytesPerChar - 1) : startOffset;

    if (startOffset < 0 || startOffset >= buffer.length) {
        throw new IndexOutOfBoundsException(
                "Given start offset is outside the bounds of the given buffer. In reverse cases, the start offset is modified to ensure we check the full size of the last character");
    }

    // make sure that the length we are traversing is at least as long as a single character
    if (length < bytesPerChar) {
        throw new IllegalArgumentException(
                "length must be at least as long as the length, in bytes, of a single character");
    }

    if (endOffset < -1 || endOffset > buffer.length) {
        throw new IndexOutOfBoundsException(
                "Given combination of startOffset and length would execute the search outside the bounds of the given buffer.");
    }

    int bufferEndOffset = reverse ? startOffset : startOffset + length;
    int result = -1;
    for (int charPos = startOffset; reverse ? charPos != endOffset
            : charPos + bytesPerChar - 1 < endOffset; charPos = reverse ? charPos - 1 : charPos + 1) {
        char c;
        if (bytesPerChar == 1) {
            c = (char) buffer[charPos];
        } else {
            String temp = new String(buffer, charPos, bytesPerChar, encoding);
            if (StringUtils.isEmpty(temp)) {
                continue;
            } else {
                c = temp.toCharArray()[0];
            }
        }

        if (isNewline(c, delimiter)) {
            result = charPos + bytesPerChar - 1;
            break;
        }
    }

    if ((delimiter == null || StringUtils.isEmpty(delimiter)) && !reverse
            && result < bufferEndOffset - bytesPerChar) {
        char c;
        if (bytesPerChar == 1) {
            c = (char) buffer[result + bytesPerChar];
        } else {
            String temp = new String(buffer, result + 1, bytesPerChar, encoding);
            if (StringUtils.isEmpty(temp)) {
                // this can occur if the number of bytes for characters in the string result in an empty string (an invalid code for the given encoding)
                // in this case, that means that we are done for the default delimiter.
                return result;
            } else {
                c = temp.toCharArray()[0];
            }
        }

        if (isNewline(c, delimiter)) {
            //we originally landed on a \r character; if we have a \r\n character, advance one position to include that
            result += bytesPerChar;
        }
    }

    return result;
}

From source file:org.apache.tika.parser.html.charsetdetector.charsets.XUserDefinedCharset.java

@Override
public boolean contains(Charset cs) {
    return cs.equals(StandardCharsets.US_ASCII);
}

From source file:org.apache.hadoop.hive.ql.udf.UDFConvert.java

public Text evaluate(Text str, Text dset, Text sset) {

    if (str == null || dset == null || sset == null) {
        return null;
    }//from ww w .j a  v  a 2 s . c o  m
    Charset in_dset = Charset.forName(dset.toString());
    Charset in_sset = Charset.forName(sset.toString());

    if (in_dset.equals(in_sset)) {
        result.set(str.toString());
        return result;
    }

    if (ori_1.equals(in_dset) && ori_2.equals(in_sset)) {
        try {
            result.set(new String(str.toString().getBytes("gbk"), "gbk"));
            return result;
        } catch (Exception e) {
            return null;
        }
    } else if (ori_1.equals(in_sset) && ori_2.equals(in_dset)) {
        try {
            result.set(str.toString().getBytes("utf-8"));
            return result;
        } catch (Exception e) {
            return null;
        }
    } else {
        return null;
    }

}

From source file:org.eclipse.californium.proxy.HttpTranslator.java

/**
 * Method to map the http entity of a http message in a coherent payload for
 * the coap message. The method simply gets the bytes from the entity and,
 * if needed changes the charset of the obtained bytes to UTF-8.
 * /*from  www. j a v  a 2s . c  o m*/
 * @param httpEntity
 *            the http entity
 * 
 * @return byte[]
 * @throws TranslationException
 *             the translation exception
 */
public static byte[] getCoapPayload(HttpEntity httpEntity) throws TranslationException {
    if (httpEntity == null) {
        throw new IllegalArgumentException("httpEntity == null");
    }

    byte[] payload = null;
    try {
        // get the bytes from the entity
        payload = EntityUtils.toByteArray(httpEntity);
        if (payload != null && payload.length > 0 && looksLikeUTF8(payload)) {

            //modifica il payload per sostituire i riferimenti a http://proxyIP:8080/proxy/

            String body = "";
            try {
                body = new String(payload, "UTF-8");
            } catch (UnsupportedEncodingException e1) {
                // TODO Auto-generated catch block
                e1.printStackTrace();
            }

            body = body.replace("http://" + proxyIP + ":8080/proxy/", "coap://");

            payload = body.getBytes();

            // the only supported charset in CoAP is UTF-8
            Charset coapCharset = UTF_8;

            // get the charset for the http entity
            ContentType httpContentType = ContentType.getOrDefault(httpEntity);
            Charset httpCharset = httpContentType.getCharset();

            // check if the charset is the one allowed by coap
            if (httpCharset != null && !httpCharset.equals(coapCharset)) {
                // translate the payload to the utf-8 charset
                payload = changeCharset(payload, httpCharset, coapCharset);
            }
        } else {
            int i = 0;
        }

    } catch (IOException e) {
        LOGGER.warning("Cannot get the content of the http entity: " + e.getMessage());
        throw new TranslationException("Cannot get the content of the http entity", e);
    } finally {
        try {
            // ensure all content has been consumed, so that the
            // underlying connection could be re-used
            EntityUtils.consume(httpEntity);
        } catch (IOException e) {

        }
    }

    return payload;
}

From source file:org.apache.stanbol.workflow.jersey.writers.ContentItemWriter.java

@Override
public void writeTo(ContentItem ci, Class<?> type, Type genericType, Annotation[] annotations,
        MediaType mediaType, MultivaluedMap<String, Object> httpHeaders, OutputStream entityStream)
        throws IOException, WebApplicationException {

    //(0) handle default dataType
    Map<String, Object> reqProp = ContentItemHelper.getRequestPropertiesContentPart(ci);
    boolean omitMetadata = isOmitMetadata(reqProp);
    if (!MULTIPART.isCompatible(mediaType)) { //two possible cases
        if (!omitMetadata) { //  (1) just return the RDF data
            //(1.a) Backward support for default dataType if no Accept header is set
            StringBuilder ctb = new StringBuilder();
            if (mediaType.isWildcardType() || TEXT_PLAIN_TYPE.isCompatible(mediaType)
                    || APPLICATION_OCTET_STREAM_TYPE.isCompatible(mediaType)) {
                ctb.append(APPLICATION_LD_JSON);
            } else {
                ctb.append(mediaType.getType()).append('/').append(mediaType.getSubtype());
            }/*from  w w w.  j  a  v  a2s .c  o  m*/
            ctb.append(";charset=").append(UTF8.name());
            String contentType = ctb.toString();
            httpHeaders.putSingle(HttpHeaders.CONTENT_TYPE, contentType);
            try {
                serializer.serialize(entityStream, ci.getMetadata(), contentType);
            } catch (UnsupportedSerializationFormatException e) {
                throw new WebApplicationException("The enhancement results "
                        + "cannot be serialized in the requested media type: " + mediaType.toString(),
                        Response.Status.NOT_ACCEPTABLE);
            }
        } else { //  (2) return a single content part
            Entry<UriRef, Blob> contentPart = getBlob(ci, Collections.singleton(mediaType.toString()));
            if (contentPart == null) { //no alternate content with the requeste media type
                throw new WebApplicationException("The requested enhancement chain has not created an "
                        + "version of the parsed content in the reuqest media type " + mediaType.toString(),
                        Response.Status.UNSUPPORTED_MEDIA_TYPE);
            } else { //found -> stream the content to the client
                //NOTE: This assumes that the presence of a charset
                //      implies reading/writing character streams
                String requestedCharset = mediaType.getParameters().get("charset");
                String blobCharset = contentPart.getValue().getParameter().get("charset");
                Charset readerCharset = blobCharset == null ? UTF8 : Charset.forName(blobCharset);
                Charset writerCharset = requestedCharset == null ? null : Charset.forName(requestedCharset);
                if (writerCharset != null && !writerCharset.equals(readerCharset)) {
                    //we need to transcode
                    Reader reader = new InputStreamReader(contentPart.getValue().getStream(), readerCharset);
                    Writer writer = new OutputStreamWriter(entityStream, writerCharset);
                    IOUtils.copy(reader, writer);
                    IOUtils.closeQuietly(reader);
                } else { //no transcoding
                    if (requestedCharset == null && blobCharset != null) {
                        httpHeaders.putSingle(HttpHeaders.CONTENT_TYPE,
                                mediaType.toString() + "; charset=" + blobCharset);
                    }
                    InputStream in = contentPart.getValue().getStream();
                    IOUtils.copy(in, entityStream);
                    IOUtils.closeQuietly(in);
                }
            }
        }
    } else { // multipart mime requested!
        final String charsetName = mediaType.getParameters().get("charset");
        final Charset charset = charsetName != null ? Charset.forName(charsetName) : UTF8;
        MediaType rdfFormat;
        String rdfFormatString = getRdfFormat(reqProp);
        if (rdfFormatString == null || rdfFormatString.isEmpty()) {
            rdfFormat = DEFAULT_RDF_FORMAT;
        } else {
            try {
                rdfFormat = MediaType.valueOf(rdfFormatString);
                if (rdfFormat.getParameters().get("charset") == null) {
                    //use the charset of the default RDF format
                    rdfFormat = new MediaType(rdfFormat.getType(), rdfFormat.getSubtype(),
                            DEFAULT_RDF_FORMAT.getParameters());
                }
            } catch (IllegalArgumentException e) {
                throw new WebApplicationException(
                        "The specified RDF format '" + rdfFormatString
                                + "' (used to serialize all RDF parts of "
                                + "multipart MIME responses) is not a well formated MIME type",
                        Response.Status.BAD_REQUEST);
            }
        }
        //(1) setting the correct header
        String contentType = String.format("%s/%s; charset=%s; boundary=%s", mediaType.getType(),
                mediaType.getSubtype(), charset.toString(), CONTENT_ITEM_BOUNDARY);
        httpHeaders.putSingle(HttpHeaders.CONTENT_TYPE, contentType);
        MultipartEntityBuilder entityBuilder = MultipartEntityBuilder.create();
        entityBuilder.setBoundary(CONTENT_ITEM_BOUNDARY);
        //HttpMultipart entity = new HttpMultipart("from-data", charset ,CONTENT_ITEM_BOUNDARY);
        //(2) serialising the metadata
        if (!isOmitMetadata(reqProp)) {
            entityBuilder.addPart("metadata",
                    new ClerezzaContentBody(ci.getUri().getUnicodeString(), ci.getMetadata(), rdfFormat));
            //                entity.addBodyPart(new FormBodyPart("metadata", new ClerezzaContentBody(
            //                    ci.getUri().getUnicodeString(), ci.getMetadata(),
            //                    rdfFormat)));
        }
        //(3) serialising the Content (Bloby)
        //(3.a) Filter based on parameter
        List<Entry<UriRef, Blob>> includedBlobs = filterBlobs(ci, reqProp);
        //(3.b) Serialise the filtered
        if (!includedBlobs.isEmpty()) {
            Map<String, ContentBody> contentParts = new LinkedHashMap<String, ContentBody>();
            for (Entry<UriRef, Blob> entry : includedBlobs) {
                Blob blob = entry.getValue();
                ContentType ct = ContentType.create(blob.getMimeType());
                String cs = blob.getParameter().get("charset");
                if (StringUtils.isNotBlank(cs)) {
                    ct = ct.withCharset(cs);
                }
                contentParts.put(entry.getKey().getUnicodeString(), new InputStreamBody(blob.getStream(), ct));
            }
            //add all the blobs
            entityBuilder.addPart("content",
                    new MultipartContentBody(contentParts, CONTENT_PARTS_BOUNDERY, MULTIPART_ALTERNATE));
        } //else no content to include
        Set<String> includeContentParts = getIncludedContentPartURIs(reqProp);
        if (includeContentParts != null) {
            //(4) serialise the Request Properties
            if (includeContentParts.isEmpty()
                    || includeContentParts.contains(REQUEST_PROPERTIES_URI.getUnicodeString())) {
                JSONObject object;
                try {
                    object = toJson(reqProp);
                } catch (JSONException e) {
                    String message = "Unable to convert Request Properties " + "to JSON (values : " + reqProp
                            + ")!";
                    log.error(message, e);
                    throw new WebApplicationException(message, Response.Status.INTERNAL_SERVER_ERROR);
                }
                entityBuilder.addTextBody(REQUEST_PROPERTIES_URI.getUnicodeString(), object.toString(),
                        ContentType.APPLICATION_JSON.withCharset(UTF8));
            }
            //(5) additional RDF metadata stored in contentParts
            for (Entry<UriRef, TripleCollection> entry : getContentParts(ci, TripleCollection.class)
                    .entrySet()) {
                if (includeContentParts.isEmpty() || includeContentParts.contains(entry.getKey())) {
                    entityBuilder.addPart(entry.getKey().getUnicodeString(), new ClerezzaContentBody(null, //no file name
                            entry.getValue(), rdfFormat));
                } // else ignore this content part
            }
        }
        entityBuilder.build().writeTo(entityStream);
    }

}