Example usage for org.apache.commons.lang ArrayUtils subarray

List of usage examples for org.apache.commons.lang ArrayUtils subarray

Introduction

In this page you can find the example usage for org.apache.commons.lang ArrayUtils subarray.

Prototype

public static boolean[] subarray(boolean[] array, int startIndexInclusive, int endIndexExclusive) 

Source Link

Document

Produces a new boolean array containing the elements between the start and end indices.

Usage

From source file:org.apache.cassandra.db.clock.IncrementCounterContext.java

/**
 * Human-readable String from context.// w ww.j av  a2 s .com
 *
 * @param context
 *            version context.
 * @return a human-readable String of the context.
 */
public String toString(byte[] context) {
    context = sortElementsById(context);

    StringBuilder sb = new StringBuilder();
    sb.append("{");
    sb.append(FBUtilities.byteArrayToLong(context, 0));
    sb.append(", ");
    sb.append(FBUtilities.byteArrayToLong(context, TIMESTAMP_LENGTH));
    sb.append(" + [");
    for (int offset = HEADER_LENGTH; offset < context.length; offset += stepLength) {
        if (offset != HEADER_LENGTH) {
            sb.append(",");
        }
        sb.append("(");
        try {
            InetAddress address = InetAddress
                    .getByAddress(ArrayUtils.subarray(context, offset, offset + idLength));
            sb.append(address.getHostAddress());
        } catch (UnknownHostException uhe) {
            sb.append("?.?.?.?");
        }
        sb.append(", ");
        sb.append(FBUtilities.byteArrayToLong(context, offset + idLength));
        sb.append(")");
    }
    sb.append("]}");
    return sb.toString();
}

From source file:org.apache.hadoop.hive.ql.exec.vector.expressions.VectorUDFMapIndexStringCol.java

@Override
protected Object getKeyByIndex(ColumnVector cv, int index) {
    BytesColumnVector bytesCV = (BytesColumnVector) cv;
    return ArrayUtils.subarray(bytesCV.vector[index], bytesCV.start[index],
            bytesCV.start[index] + bytesCV.length[index]);
}

From source file:org.apache.hadoop.hive.ql.io.parquet.TestVectorizedColumnReaderBase.java

protected void binaryRead(boolean isDictionaryEncoding) throws Exception {
    Configuration conf = new Configuration();
    conf.set(IOConstants.COLUMNS, "binary_field_some_null");
    conf.set(IOConstants.COLUMNS_TYPES, "string");
    conf.setBoolean(ColumnProjectionUtils.READ_ALL_COLUMNS, false);
    conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, "0");
    VectorizedParquetRecordReader reader = createParquetReader(
            "message test { required binary binary_field_some_null;}", conf);
    VectorizedRowBatch previous = reader.createValue();
    int c = 0;/*from   w ww.ja v  a2  s . co m*/
    try {
        while (reader.next(NullWritable.get(), previous)) {
            BytesColumnVector vector = (BytesColumnVector) previous.cols[0];
            boolean noNull = true;
            for (int i = 0; i < vector.vector.length; i++) {
                if (c == nElements) {
                    break;
                }
                String actual;
                assertEquals("Null assert failed at " + c, isNull(c), vector.isNull[i]);
                if (!vector.isNull[i]) {
                    actual = new String(ArrayUtils.subarray(vector.vector[i], vector.start[i],
                            vector.start[i] + vector.length[i]));
                    assertEquals("failed at " + c, getStr(isDictionaryEncoding, c), actual);
                } else {
                    noNull = false;
                }
                c++;
            }
            assertEquals("No Null check failed at " + c, noNull, vector.noNulls);
            assertFalse(vector.isRepeating);
        }
        assertEquals("It doesn't exit at expected position", nElements, c);
    } finally {
        reader.close();
    }
}

From source file:org.apache.hadoop.hive.ql.io.parquet.TestVectorizedListColumnReader.java

private void assertValue(String type, ColumnVector childVector, boolean isDictionaryEncoding, int valueIndex,
        int position) {
    if ("int".equals(type)) {
        assertEquals(getIntValue(isDictionaryEncoding, valueIndex),
                ((LongColumnVector) childVector).vector[position]);
    } else if ("long".equals(type)) {
        assertEquals(getLongValue(isDictionaryEncoding, valueIndex),
                ((LongColumnVector) childVector).vector[position]);
    } else if ("double".equals(type)) {
        assertEquals(getDoubleValue(isDictionaryEncoding, valueIndex),
                ((DoubleColumnVector) childVector).vector[position], 0);
    } else if ("float".equals(type)) {
        assertEquals(getFloatValue(isDictionaryEncoding, valueIndex),
                ((DoubleColumnVector) childVector).vector[position], 0);
    } else if ("boolean".equals(type)) {
        assertEquals((getBooleanValue(valueIndex) ? 1 : 0), ((LongColumnVector) childVector).vector[position]);
    } else if ("binary".equals(type)) {
        String actual = new String(ArrayUtils.subarray(((BytesColumnVector) childVector).vector[position],
                ((BytesColumnVector) childVector).start[position],
                ((BytesColumnVector) childVector).start[position]
                        + ((BytesColumnVector) childVector).length[position]));
        assertEquals(getStr(isDictionaryEncoding, valueIndex), actual);
    } else if ("decimal".equals(type)) {
        assertEquals(getDecimal(isDictionaryEncoding, valueIndex),
                ((DecimalColumnVector) childVector).vector[position].getHiveDecimal());
    } else {/*w ww  . j a va 2 s  . c  om*/
        throw new RuntimeException("Unsupported type for TestVectorizedListColumnReader!");
    }

}

From source file:org.apache.hadoop.hive.ql.io.parquet.TestVectorizedMapColumnReader.java

private void assertValue(String type, ColumnVector childVector, boolean isDictionaryEncoding, int valueIndex,
        int position) {
    if ("int".equals(type)) {
        assertEquals(getIntValue(isDictionaryEncoding, valueIndex),
                ((LongColumnVector) childVector).vector[position]);
    } else if ("long".equals(type)) {
        assertEquals(getLongValue(isDictionaryEncoding, valueIndex),
                ((LongColumnVector) childVector).vector[position]);
    } else if ("double".equals(type)) {
        assertEquals(getDoubleValue(isDictionaryEncoding, valueIndex),
                ((DoubleColumnVector) childVector).vector[position], 0);
    } else if ("float".equals(type)) {
        assertEquals(getFloatValue(isDictionaryEncoding, valueIndex),
                ((DoubleColumnVector) childVector).vector[position], 0);
    } else if ("binary".equals(type) || "multipleLevel".equals(type)) {
        String actual = new String(ArrayUtils.subarray(((BytesColumnVector) childVector).vector[position],
                ((BytesColumnVector) childVector).start[position],
                ((BytesColumnVector) childVector).start[position]
                        + ((BytesColumnVector) childVector).length[position]));
        assertEquals(getStr(isDictionaryEncoding, valueIndex), actual);
    } else if ("decimal".equals(type)) {
        assertEquals(getDecimal(isDictionaryEncoding, valueIndex),
                ((DecimalColumnVector) childVector).vector[position].getHiveDecimal());
    } else {/*from ww w.  j  av a  2  s .  c  o m*/
        throw new RuntimeException("Unsupported type for TestVectorizedMapColumnReader!");
    }
}

From source file:org.apache.kylin.cube.model.RowKeyDesc.java

private void initColumnsNeedIndex() {
    int[] tmp = new int[100];
    int x = 0;/*from  w ww. j a  va2 s. c  om*/
    for (int i = 0, n = rowkeyColumns.length; i < n; i++) {
        if ("true".equalsIgnoreCase(rowkeyColumns[i].getIndex()) && rowkeyColumns[i].isUsingDictionary()) {
            tmp[x] = i;
            x++;
        }
    }

    columnsNeedIndex = ArrayUtils.subarray(tmp, 0, x);
}

From source file:org.apache.niolex.commons.codec.CipherUtil.java

/**
 * For some kind of cipher, e.g. RSA, can not handle bytes larger than a fixed block size.
 * So, this method is just for this kind of cipher to handle large bytes.
 *
 * @param cipher/*  www.  ja va 2  s .c  o  m*/
 * @param blockSize
 * @param input
 * @return the processed bytes
 * @throws IllegalBlockSizeException
 * @throws BadPaddingException
 * @throws ShortBufferException
 */
public static byte[] process(Cipher cipher, int blockSize, byte[] input)
        throws IllegalBlockSizeException, BadPaddingException, ShortBufferException {
    if (input.length <= blockSize) {
        return cipher.doFinal(input);
    }
    final int OUTPUT_SIZE = (input.length + blockSize - 1) / blockSize * cipher.getOutputSize(blockSize);

    byte[] output = new byte[OUTPUT_SIZE];
    int outputIndex = 0;
    for (int i = 0;; i += blockSize) {
        if (i + blockSize < input.length)
            outputIndex += cipher.doFinal(input, i, blockSize, output, outputIndex);
        else {
            outputIndex += cipher.doFinal(input, i, input.length - i, output, outputIndex);
            break;
        }
    }
    if (outputIndex != OUTPUT_SIZE)
        return ArrayUtils.subarray(output, 0, outputIndex);
    return output;
}

From source file:org.apache.niolex.commons.net.DownloadUtil.java

/**
 * The unusual process of download a size unknown file.
 *
 * @param strUrl The Url to be downloaded.
 * @param in the input stream.//w  ww . ja va  2  s  . c om
 * @param maxFileSize Max file size in BYTE.
 * @param useCache Whether we use thread local cache or not.
 * @return the file content.
 * @throws IOException
 * @throws NetException
 */
public static byte[] unusualDownload(String strUrl, InputStream in, int maxFileSize, Boolean useCache)
        throws IOException, NetException {
    byte[] byteBuf = getByteBuffer(useCache == null ? useThreadLocalCache : useCache);
    byte[] ret = null;
    int count, total = 0;
    final int size = byteBuf.length;
    // Start to download file.
    while ((count = in.read(byteBuf, total, size - total)) > 0) {
        total += count;
    }
    if (count == -1) {
        // Case 1. File is ready
        ret = ArrayUtils.subarray(byteBuf, 0, total);
    } else {
        // Case 2. We still need read more data
        ByteArrayOutputStream bos = new ByteArrayOutputStream(MATERIAL_SIZE);
        count = total;
        total = 0;
        do {
            bos.write(byteBuf, 0, count);
            total += count;
            if (total > maxFileSize) {
                throw new NetException(NetException.ExCode.FILE_TOO_LARGE,
                        "File " + strUrl + " size exceed [" + maxFileSize + "] download stoped.");
            }
        } while ((count = in.read(byteBuf)) > 0);
        ret = bos.toByteArray();
    }
    validateContentLength(strUrl, ret.length, maxFileSize);
    return ret;
}

From source file:org.apache.shindig.common.util.CharsetUtil.java

/**
 * @return UTF-8 byte array for the input string.
 *///from  ww  w . j  a  va2  s.com
public static byte[] getUtf8Bytes(String s) {
    if (s == null) {
        return ArrayUtils.EMPTY_BYTE_ARRAY;
    }
    ByteBuffer bb = Charsets.UTF_8.encode(s);
    return ArrayUtils.subarray(bb.array(), 0, bb.limit());

}

From source file:org.apache.tajo.storage.v2.CSVFileScanner.java

private void page() throws IOException {
    // Index initialization
    currentIdx = 0;//  w  w w. j a  v a  2  s  . c  o m

    // Buffer size set
    if (isSplittable() && fragmentable() < DEFAULT_BUFFER_SIZE) {
        bufSize = (int) fragmentable();
    }

    if (this.tail == null || this.tail.length == 0) {
        this.pageStart = getFilePosition();
        this.prevTailLen = 0;
    } else {
        this.pageStart = getFilePosition() - this.tail.length;
        this.prevTailLen = this.tail.length;
    }

    // Read
    int rbyte;
    buf = new byte[bufSize];
    rbyte = is.read(buf);

    if (rbyte < 0) {
        eof = true; // EOF
        return;
    }

    if (prevTailLen == 0) {
        tail = new byte[0];
        tuples = BytesUtils.splitPreserveAllTokens(buf, rbyte, (char) LF);
    } else {
        byte[] lastRow = ArrayUtils.addAll(tail, buf);
        tuples = BytesUtils.splitPreserveAllTokens(lastRow, rbyte + tail.length, (char) LF);
        tail = null;
    }

    // Check tail
    if ((char) buf[rbyte - 1] != LF) {
        if ((fragmentable() < 1 || rbyte != bufSize)) {
            int lineFeedPos = 0;
            byte[] temp = new byte[DEFAULT_BUFFER_SIZE];

            // find line feed
            while ((temp[lineFeedPos] = (byte) is.read()) != (byte) LF) {
                if (temp[lineFeedPos] < 0) {
                    break;
                }
                lineFeedPos++;
            }

            tuples[tuples.length - 1] = ArrayUtils.addAll(tuples[tuples.length - 1],
                    ArrayUtils.subarray(temp, 0, lineFeedPos));
            validIdx = tuples.length;
        } else {
            tail = tuples[tuples.length - 1];
            validIdx = tuples.length - 1;
        }
    } else {
        tail = new byte[0];
        validIdx = tuples.length - 1;
    }

    if (!isCompress())
        makeTupleOffset();
}