Example usage for java.nio.charset StandardCharsets UTF_16

List of usage examples for java.nio.charset StandardCharsets UTF_16

Introduction

In this page you can find the example usage for java.nio.charset StandardCharsets UTF_16.

Prototype

Charset UTF_16

To view the source code for java.nio.charset StandardCharsets UTF_16.

Click Source Link

Document

Sixteen-bit UCS Transformation Format, byte order identified by an optional byte-order mark.

Usage

From source file:Main.java

public static void main(String[] args) throws Exception {
    System.out.println(StandardCharsets.UTF_16.name());
}

From source file:de.saly.json.jsr353.benchmark.data.CreateJsonTestFiles.java

public static void create(String path) throws Exception {

    if (path == null || path.length() == 0) {
        path = "./generated/";
    }/*from w w w  .jav a 2s  . c o m*/

    final File dir = new File(path).getAbsoluteFile();
    dir.mkdirs();

    System.out.println("Generating benchmark data " + dir.getAbsolutePath());

    create(path, 1, StandardCharsets.UTF_8);
    create(path, 1, StandardCharsets.UTF_16);

    create(path, 10, StandardCharsets.UTF_8);
    create(path, 10, StandardCharsets.UTF_16);

    create(path, 100, StandardCharsets.UTF_8);
    create(path, 100, StandardCharsets.UTF_16);

    create(path, 1000, StandardCharsets.UTF_8);
    create(path, 1000, StandardCharsets.UTF_16);

    create(path, 10000, StandardCharsets.UTF_8);
    create(path, 10000, StandardCharsets.UTF_16);

    create(path, 100000, StandardCharsets.UTF_8);
    create(path, 100000, StandardCharsets.UTF_16);

    create(path, 10000000, StandardCharsets.UTF_8); // 10gb
    createBigStack(path, 10000000 * 10, StandardCharsets.UTF_8);

    System.out.println("Finished.");
    System.out.println();
}

From source file:acmi.l2.clientmod.l2_version_switcher.Util.java

public static List<FileInfo> getFileInfo(InputStream is) {
    return new BufferedReader(new InputStreamReader(is, StandardCharsets.UTF_16)).lines().map(FileInfo::parse)
            .collect(Collectors.toList());
}

From source file:com.microsoft.azure.management.datalake.store.uploader.StringExtensions.java

/**
 * Finds the index in the given buffer of a newline character, either the first or the last (based on the parameters).
 * If a combined newline (\r\n), the index returned is that of the last character in the sequence.
 *
 * @param buffer The buffer to search in.
 * @param startOffset The index of the first byte to start searching at.
 * @param length The number of bytes to search, starting from the given startOffset.
 * @param reverse If true, searches from the startOffset down to the beginning of the buffer. If false, searches upwards.
 * @param encoding Indicates the type of encoding to use for the buffered bytes.
 * @param delimiter Optionally indicates the delimiter to consider as the "new line", which MUST BE a single character. If null, the default is '\\r', '\\n' and '\\r\\n'.
 * @return The index of the closest newline character in the sequence (based on direction) that was found. Returns -1 if not found.
 */// ww w.j ava 2s  .  c  o  m
public static int findNewline(byte[] buffer, int startOffset, int length, boolean reverse, Charset encoding,
        String delimiter) {
    if (buffer.length == 0 || length == 0) {
        return -1;
    }

    // define the bytes per character to use
    int bytesPerChar;
    if (encoding.equals(StandardCharsets.UTF_16) || encoding.equals(StandardCharsets.UTF_16BE)
            || encoding.equals(StandardCharsets.UTF_16LE)) {
        bytesPerChar = 2;
    } else if (encoding.equals(StandardCharsets.US_ASCII) || encoding.equals(StandardCharsets.UTF_8)) {
        bytesPerChar = 1;
    } else {
        throw new IllegalArgumentException(
                "Only the following encodings are allowed: UTF-8, UTF-16, UTF-16BE, UTF16-LE and ASCII");
    }

    if (delimiter != null && !StringUtils.isEmpty(delimiter) && delimiter.length() > 1) {
        throw new IllegalArgumentException(
                "The delimiter must only be a single character or unspecified to represent the CRLF delimiter");
    }

    if (delimiter != null && !StringUtils.isEmpty(delimiter)) {
        // convert the byte array back to a String
        int startOfSegment = reverse ? startOffset - length + 1 : startOffset;
        String bytesToString = new String(buffer, startOfSegment, length, encoding);
        if (!bytesToString.contains(delimiter)) {
            // didn't find the delimiter.
            return -1;
        }

        // the index is returned, which is 0 based, so our loop must include the zero case.
        int numCharsToDelim = reverse ? bytesToString.lastIndexOf(delimiter) : bytesToString.indexOf(delimiter);
        int toReturn = 0;
        for (int i = 0; i <= numCharsToDelim; i++) {
            toReturn += Character.toString(bytesToString.charAt(startOfSegment + i)).getBytes(encoding).length;
        }

        // we get the total number of bytes, but we want to return the index (which starts at 0)
        // so we subtract 1 from the total number of bytes to get the final byte index.
        return toReturn - 1;
    }

    //endOffset is a 'sentinel' value; we use that to figure out when to stop searching
    int endOffset = reverse ? startOffset - length : startOffset + length;

    // if we are starting at the end, we need to move toward the front enough to grab the right number of bytes
    startOffset = reverse ? startOffset - (bytesPerChar - 1) : startOffset;

    if (startOffset < 0 || startOffset >= buffer.length) {
        throw new IndexOutOfBoundsException(
                "Given start offset is outside the bounds of the given buffer. In reverse cases, the start offset is modified to ensure we check the full size of the last character");
    }

    // make sure that the length we are traversing is at least as long as a single character
    if (length < bytesPerChar) {
        throw new IllegalArgumentException(
                "length must be at least as long as the length, in bytes, of a single character");
    }

    if (endOffset < -1 || endOffset > buffer.length) {
        throw new IndexOutOfBoundsException(
                "Given combination of startOffset and length would execute the search outside the bounds of the given buffer.");
    }

    int bufferEndOffset = reverse ? startOffset : startOffset + length;
    int result = -1;
    for (int charPos = startOffset; reverse ? charPos != endOffset
            : charPos + bytesPerChar - 1 < endOffset; charPos = reverse ? charPos - 1 : charPos + 1) {
        char c;
        if (bytesPerChar == 1) {
            c = (char) buffer[charPos];
        } else {
            String temp = new String(buffer, charPos, bytesPerChar, encoding);
            if (StringUtils.isEmpty(temp)) {
                continue;
            } else {
                c = temp.toCharArray()[0];
            }
        }

        if (isNewline(c, delimiter)) {
            result = charPos + bytesPerChar - 1;
            break;
        }
    }

    if ((delimiter == null || StringUtils.isEmpty(delimiter)) && !reverse
            && result < bufferEndOffset - bytesPerChar) {
        char c;
        if (bytesPerChar == 1) {
            c = (char) buffer[result + bytesPerChar];
        } else {
            String temp = new String(buffer, result + 1, bytesPerChar, encoding);
            if (StringUtils.isEmpty(temp)) {
                // this can occur if the number of bytes for characters in the string result in an empty string (an invalid code for the given encoding)
                // in this case, that means that we are done for the default delimiter.
                return result;
            } else {
                c = temp.toCharArray()[0];
            }
        }

        if (isNewline(c, delimiter)) {
            //we originally landed on a \r character; if we have a \r\n character, advance one position to include that
            result += bytesPerChar;
        }
    }

    return result;
}

From source file:org.apache.logging.log4j.core.layout.CsvLogEventLayoutTest.java

@Test
public void testCustomCharset() {
    final AbstractCsvLayout layout = CsvLogEventLayout.createLayout(null, "Excel", null, null, null, null, null,
            null, StandardCharsets.UTF_16, null, null);
    assertEquals("text/csv; charset=UTF-16", layout.getContentType());
}

From source file:io.restassured.module.mockmvc.ContentTypeTest.java

@Test
public void adds_specific_charset_to_content_type_by_default() {
    final AtomicReference<String> contentType = new AtomicReference<String>();

    RestAssuredMockMvc.given().standaloneSetup(new GreetingController())
            .config(RestAssuredMockMvc.config()
                    .encoderConfig(EncoderConfig.encoderConfig().defaultCharsetForContentType(
                            StandardCharsets.UTF_16.toString(), ContentType.JSON)))
            .contentType(ContentType.JSON).interceptor(new MockHttpServletRequestBuilderInterceptor() {
                public void intercept(MockHttpServletRequestBuilder requestBuilder) {
                    MultiValueMap<String, Object> headers = Whitebox.getInternalState(requestBuilder,
                            "headers");
                    contentType.set(String.valueOf(headers.getFirst("Content-Type")));
                }// w  w w. j a  v a 2  s .c o m
            }).when().get("/greeting?name={name}", "Johan").then().statusCode(200);

    assertThat(contentType.get()).isEqualTo("application/json;charset=" + StandardCharsets.UTF_16.toString());
    assertThat(contentType.get())
            .doesNotContain(RestAssuredMockMvc.config().getEncoderConfig().defaultContentCharset());
}

From source file:org.apache.logging.log4j.core.layout.CsvParameterLayoutTest.java

@Test
public void testCustomCharset() {
    final AbstractCsvLayout layout = CsvParameterLayout.createLayout(null, "Excel", null, null, null, null,
            null, null, StandardCharsets.UTF_16, null, null);
    assertEquals("text/csv; charset=UTF-16", layout.getContentType());
}

From source file:org.apache.nifi.security.util.crypto.HashService.java

/**
 * Returns an array of {@link AllowableValue} elements for each {@link Charset}. Only the charsets in {@link StandardCharsets} are returned to be consistent across JVM instances.
 *
 * @return an ordered {@code AllowableValue[]} containing the values
 *//*  w w  w  .  j  av  a2s  .c  o  m*/
public static AllowableValue[] buildCharacterSetAllowableValues() {
    final List<Charset> charsets = getSupportedCharsets();
    return charsets.stream()
            .map(cs -> new AllowableValue(cs.name(), cs.displayName(),
                    cs == StandardCharsets.UTF_16 ? UTF_16_DESCRIPTION : cs.displayName()))
            .toArray(AllowableValue[]::new);
}

From source file:org.sonar.scanner.scan.filesystem.ByteCharsetDetectorTest.java

@Test
public void tryUTF16heuristics() {
    when(validation.isUTF8(any(byte[].class), anyBoolean())).thenReturn(Result.INVALID);
    when(validation.isUTF16(any(byte[].class), anyBoolean()))
            .thenReturn(Result.newValid(StandardCharsets.UTF_16));
    when(validation.isValidUTF16(any(byte[].class), anyBoolean())).thenReturn(true);

    assertThat(charsets.detect(new byte[1])).isEqualTo(StandardCharsets.UTF_16);
}

From source file:org.apache.nifi.security.util.crypto.HashService.java

/**
 * Returns a {@link List} of supported {@link Charset}s on this platform. This is not a complete
 * list, as only the charsets in {@link StandardCharsets} are returned to be consistent across
 * JVM instances.//from w  w  w.jav a  2  s  .  c om
 *
 * @return the list of charsets
 */
public static List<Charset> getSupportedCharsets() {
    return Arrays.asList(StandardCharsets.US_ASCII, StandardCharsets.ISO_8859_1, StandardCharsets.UTF_8,
            StandardCharsets.UTF_16BE, StandardCharsets.UTF_16LE, StandardCharsets.UTF_16);
}