Example usage for org.apache.commons.lang ArrayUtils EMPTY_BYTE_ARRAY

List of usage examples for org.apache.commons.lang ArrayUtils EMPTY_BYTE_ARRAY

Introduction

In this page you can find the example usage for org.apache.commons.lang ArrayUtils EMPTY_BYTE_ARRAY.

Prototype

null EMPTY_BYTE_ARRAY

To view the source code for org.apache.commons.lang ArrayUtils EMPTY_BYTE_ARRAY.

Click Source Link

Document

An empty immutable byte array.

Usage

From source file:org.apache.cassandra.streaming.ReplicationFinishedVerbHandler.java

public void doVerb(Message msg, String id) {
    StorageService.instance.confirmReplication(msg.getFrom());
    Message response = msg.getInternalReply(ArrayUtils.EMPTY_BYTE_ARRAY, msg.getVersion());
    if (logger.isDebugEnabled())
        logger.debug("Replying to " + id + "@" + msg.getFrom());
    MessagingService.instance().sendReply(response, id, msg.getFrom());
}

From source file:org.apache.cassandra.streaming.StreamingTransferTest.java

@Test
public void testTransferTable() throws Exception {
    Table table = Table.open("Keyspace1");
    ColumnFamilyStore cfs = table.getColumnFamilyStore("Indexed1");

    // write a temporary SSTable, and unregister it
    for (int i = 1; i <= 3; i++) {
        String key = "key" + i;
        RowMutation rm = new RowMutation("Keyspace1", key.getBytes());
        ColumnFamily cf = ColumnFamily.create(table.name, cfs.columnFamily);
        cf.addColumn(column(key, "v", new TimestampClock(0)));
        cf.addColumn(new Column("birthdate".getBytes("UTF8"), FBUtilities.toByteArray((long) i),
                new TimestampClock(0)));
        rm.add(cf);/*ww  w. j  a v a 2  s.  co  m*/
        rm.apply();
    }
    cfs.forceBlockingFlush();
    assert cfs.getSSTables().size() == 1;
    SSTableReader sstable = cfs.getSSTables().iterator().next();
    cfs.removeAllSSTables();

    // transfer the first and last key
    IPartitioner p = StorageService.getPartitioner();
    List<Range> ranges = new ArrayList<Range>();
    ranges.add(new Range(p.getMinimumToken(), p.getToken("key1".getBytes())));
    ranges.add(new Range(p.getToken("key2".getBytes()), p.getMinimumToken()));
    StreamOutSession session = StreamOutSession.create(table.name, LOCAL, null);
    StreamOut.transferSSTables(session, Arrays.asList(sstable), ranges);
    session.await();

    // confirm that the SSTable was transferred and registered
    List<Row> rows = Util.getRangeSlice(cfs);
    assertEquals(2, rows.size());
    assert Arrays.equals(rows.get(0).key.key, "key1".getBytes());
    assert Arrays.equals(rows.get(1).key.key, "key3".getBytes());
    assertEquals(2, rows.get(0).cf.getColumnsMap().size());
    assertEquals(2, rows.get(1).cf.getColumnsMap().size());
    assert rows.get(1).cf.getColumn("key3".getBytes()) != null;

    // and that the index and filter were properly recovered
    assert null != cfs
            .getColumnFamily(QueryFilter.getIdentityFilter(Util.dk("key1"), new QueryPath(cfs.columnFamily)));
    assert null != cfs
            .getColumnFamily(QueryFilter.getIdentityFilter(Util.dk("key3"), new QueryPath(cfs.columnFamily)));

    // and that the secondary index works
    IndexExpression expr = new IndexExpression("birthdate".getBytes("UTF8"), IndexOperator.EQ,
            FBUtilities.toByteArray(3L));
    IndexClause clause = new IndexClause(Arrays.asList(expr), ArrayUtils.EMPTY_BYTE_ARRAY, 100);
    IFilter filter = new IdentityQueryFilter();
    Range range = new Range(p.getMinimumToken(), p.getMinimumToken());
    rows = cfs.scan(clause, range, filter);
    assertEquals(1, rows.size());
    assert Arrays.equals(rows.get(0).key.key, "key3".getBytes());
}

From source file:org.apache.hadoop.hbase.util.TestRegionSplitter.java

/**
 * Test creating a pre-split table using the HexStringSplit algorithm.
 *//* w  w w .java  2  s . c o m*/
@Test
public void testCreatePresplitTableHex() throws Exception {
    final List<byte[]> expectedBounds = new ArrayList<byte[]>();
    expectedBounds.add(ArrayUtils.EMPTY_BYTE_ARRAY);
    expectedBounds.add("10000000".getBytes());
    expectedBounds.add("20000000".getBytes());
    expectedBounds.add("30000000".getBytes());
    expectedBounds.add("40000000".getBytes());
    expectedBounds.add("50000000".getBytes());
    expectedBounds.add("60000000".getBytes());
    expectedBounds.add("70000000".getBytes());
    expectedBounds.add("80000000".getBytes());
    expectedBounds.add("90000000".getBytes());
    expectedBounds.add("a0000000".getBytes());
    expectedBounds.add("b0000000".getBytes());
    expectedBounds.add("c0000000".getBytes());
    expectedBounds.add("d0000000".getBytes());
    expectedBounds.add("e0000000".getBytes());
    expectedBounds.add("f0000000".getBytes());
    expectedBounds.add(ArrayUtils.EMPTY_BYTE_ARRAY);

    // Do table creation/pre-splitting and verification of region boundaries
    preSplitTableAndVerify(expectedBounds, HexStringSplit.class.getSimpleName(), "NewHexPresplitTable");
}

From source file:org.apache.hadoop.hbase.util.TestRegionSplitter.java

/**
 * Test creating a pre-split table using the UniformSplit algorithm.
 *///from   w w w. j a  v  a  2  s .  c  om
@Test
public void testCreatePresplitTableUniform() throws Exception {
    List<byte[]> expectedBounds = new ArrayList<byte[]>();
    expectedBounds.add(ArrayUtils.EMPTY_BYTE_ARRAY);
    expectedBounds.add(new byte[] { 0x10, 0, 0, 0, 0, 0, 0, 0 });
    expectedBounds.add(new byte[] { 0x20, 0, 0, 0, 0, 0, 0, 0 });
    expectedBounds.add(new byte[] { 0x30, 0, 0, 0, 0, 0, 0, 0 });
    expectedBounds.add(new byte[] { 0x40, 0, 0, 0, 0, 0, 0, 0 });
    expectedBounds.add(new byte[] { 0x50, 0, 0, 0, 0, 0, 0, 0 });
    expectedBounds.add(new byte[] { 0x60, 0, 0, 0, 0, 0, 0, 0 });
    expectedBounds.add(new byte[] { 0x70, 0, 0, 0, 0, 0, 0, 0 });
    expectedBounds.add(new byte[] { (byte) 0x80, 0, 0, 0, 0, 0, 0, 0 });
    expectedBounds.add(new byte[] { (byte) 0x90, 0, 0, 0, 0, 0, 0, 0 });
    expectedBounds.add(new byte[] { (byte) 0xa0, 0, 0, 0, 0, 0, 0, 0 });
    expectedBounds.add(new byte[] { (byte) 0xb0, 0, 0, 0, 0, 0, 0, 0 });
    expectedBounds.add(new byte[] { (byte) 0xc0, 0, 0, 0, 0, 0, 0, 0 });
    expectedBounds.add(new byte[] { (byte) 0xd0, 0, 0, 0, 0, 0, 0, 0 });
    expectedBounds.add(new byte[] { (byte) 0xe0, 0, 0, 0, 0, 0, 0, 0 });
    expectedBounds.add(new byte[] { (byte) 0xf0, 0, 0, 0, 0, 0, 0, 0 });
    expectedBounds.add(ArrayUtils.EMPTY_BYTE_ARRAY);

    // Do table creation/pre-splitting and verification of region boundaries
    preSplitTableAndVerify(expectedBounds, UniformSplit.class.getSimpleName(), "NewUniformPresplitTable");
}

From source file:org.apache.hadoop.hbase.util.TestRegionSplitter.java

/**
 * Unit tests for the UniformSplit algorithm. Makes sure it divides up the space of
 * keys in the way that we expect./*from  w  w  w .  ja v  a2s .  co  m*/
 */
@Test
public void unitTestUniformSplit() {
    UniformSplit splitter = new UniformSplit();

    // Check splitting while starting from scratch
    try {
        splitter.split(1);
        throw new AssertionError("Splitting into <2 regions should have thrown exception");
    } catch (IllegalArgumentException e) {
    }

    byte[][] twoRegionsSplits = splitter.split(2);
    assertEquals(1, twoRegionsSplits.length);
    assertArrayEquals(twoRegionsSplits[0], new byte[] { (byte) 0x80, 0, 0, 0, 0, 0, 0, 0 });

    byte[][] threeRegionsSplits = splitter.split(3);
    assertEquals(2, threeRegionsSplits.length);
    byte[] expectedSplit0 = new byte[] { 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55, 0x55 };
    assertArrayEquals(expectedSplit0, threeRegionsSplits[0]);
    byte[] expectedSplit1 = new byte[] { (byte) 0xAA, (byte) 0xAA, (byte) 0xAA, (byte) 0xAA, (byte) 0xAA,
            (byte) 0xAA, (byte) 0xAA, (byte) 0xAA };
    assertArrayEquals(expectedSplit1, threeRegionsSplits[1]);

    // Check splitting existing regions that have start and end points
    byte[] splitPoint = splitter.split(new byte[] { 0x10 }, new byte[] { 0x30 });
    assertArrayEquals(new byte[] { 0x20 }, splitPoint);

    byte[] lastRow = new byte[] { xFF, xFF, xFF, xFF, xFF, xFF, xFF, xFF };
    assertArrayEquals(lastRow, splitter.lastRow());
    byte[] firstRow = ArrayUtils.EMPTY_BYTE_ARRAY;
    assertArrayEquals(firstRow, splitter.firstRow());

    splitPoint = splitter.split(firstRow, new byte[] { 0x20 });
    assertArrayEquals(splitPoint, new byte[] { 0x10 });

    splitPoint = splitter.split(new byte[] { (byte) 0xdf, xFF, xFF, xFF, xFF, xFF, xFF, xFF }, lastRow);
    assertArrayEquals(splitPoint, new byte[] { (byte) 0xef, xFF, xFF, xFF, xFF, xFF, xFF, xFF });
}

From source file:org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriterFactory.java

private static VectorExpressionWriter genVectorExpressionWritableBinary(
        SettableBinaryObjectInspector fieldObjInspector) throws HiveException {
    return new VectorExpressionWriterBytes() {
        private Object obj;
        private byte[] bytes;

        public VectorExpressionWriter init(SettableBinaryObjectInspector objInspector) throws HiveException {
            super.init(objInspector);
            this.bytes = ArrayUtils.EMPTY_BYTE_ARRAY;
            this.obj = initValue(null);
            return this;
        }/*  www  . ja va2 s . c om*/

        @Override
        public Object writeValue(byte[] value, int start, int length) throws HiveException {
            bytes = Arrays.copyOfRange(value, start, start + length);
            ((SettableBinaryObjectInspector) this.objectInspector).set(this.obj, bytes);
            return this.obj;
        }

        @Override
        public Object setValue(Object field, byte[] value, int start, int length) throws HiveException {
            if (null == field) {
                field = initValue(null);
            }
            bytes = Arrays.copyOfRange(value, start, start + length);
            ((SettableBinaryObjectInspector) this.objectInspector).set(field, bytes);
            return field;
        }

        @Override
        public Object initValue(Object ignored) {
            return ((SettableBinaryObjectInspector) this.objectInspector).create(ArrayUtils.EMPTY_BYTE_ARRAY);
        }
    }.init(fieldObjInspector);
}

From source file:org.apache.hadoop.hive.ql.exec.vector.RandomRowObjectSource.java

public Object getWritableObject(int column, Object object) {
    ObjectInspector objectInspector = primitiveObjectInspectorList.get(column);
    PrimitiveCategory primitiveCategory = primitiveCategories[column];
    PrimitiveTypeInfo primitiveTypeInfo = primitiveTypeInfos[column];
    switch (primitiveCategory) {
    case BOOLEAN:
        return ((WritableBooleanObjectInspector) objectInspector).create((boolean) object);
    case BYTE:/*  w ww.j  a va  2  s .co m*/
        return ((WritableByteObjectInspector) objectInspector).create((byte) object);
    case SHORT:
        return ((WritableShortObjectInspector) objectInspector).create((short) object);
    case INT:
        return ((WritableIntObjectInspector) objectInspector).create((int) object);
    case LONG:
        return ((WritableLongObjectInspector) objectInspector).create((long) object);
    case DATE:
        return ((WritableDateObjectInspector) objectInspector).create((Date) object);
    case FLOAT:
        return ((WritableFloatObjectInspector) objectInspector).create((float) object);
    case DOUBLE:
        return ((WritableDoubleObjectInspector) objectInspector).create((double) object);
    case STRING:
        return ((WritableStringObjectInspector) objectInspector).create((String) object);
    case CHAR: {
        WritableHiveCharObjectInspector writableCharObjectInspector = new WritableHiveCharObjectInspector(
                (CharTypeInfo) primitiveTypeInfo);
        return writableCharObjectInspector.create(new HiveChar(StringUtils.EMPTY, -1));
    }
    case VARCHAR: {
        WritableHiveVarcharObjectInspector writableVarcharObjectInspector = new WritableHiveVarcharObjectInspector(
                (VarcharTypeInfo) primitiveTypeInfo);
        return writableVarcharObjectInspector.create(new HiveVarchar(StringUtils.EMPTY, -1));
    }
    case BINARY:
        return PrimitiveObjectInspectorFactory.writableBinaryObjectInspector
                .create(ArrayUtils.EMPTY_BYTE_ARRAY);
    case TIMESTAMP:
        return ((WritableTimestampObjectInspector) objectInspector).create(new Timestamp(0));
    case INTERVAL_YEAR_MONTH:
        return ((WritableHiveIntervalYearMonthObjectInspector) objectInspector)
                .create(new HiveIntervalYearMonth(0));
    case INTERVAL_DAY_TIME:
        return ((WritableHiveIntervalDayTimeObjectInspector) objectInspector)
                .create(new HiveIntervalDayTime(0, 0));
    case DECIMAL: {
        WritableHiveDecimalObjectInspector writableDecimalObjectInspector = new WritableHiveDecimalObjectInspector(
                (DecimalTypeInfo) primitiveTypeInfo);
        return writableDecimalObjectInspector.create(HiveDecimal.ZERO);
    }
    default:
        throw new Error("Unknown primitive category " + primitiveCategory);
    }
}

From source file:org.apache.hadoop.hive.ql.exec.vector.VectorizedBatchUtil.java

public static Writable getPrimitiveWritable(PrimitiveCategory primitiveCategory) {
    switch (primitiveCategory) {
    case VOID://  w w w.j  a  v a  2  s.c  om
        return null;
    case BOOLEAN:
        return new BooleanWritable(false);
    case BYTE:
        return new ByteWritable((byte) 0);
    case SHORT:
        return new ShortWritable((short) 0);
    case INT:
        return new IntWritable(0);
    case LONG:
        return new LongWritable(0);
    case TIMESTAMP:
        return new TimestampWritable(new Timestamp(0));
    case DATE:
        return new DateWritable(new Date(0));
    case FLOAT:
        return new FloatWritable(0);
    case DOUBLE:
        return new DoubleWritable(0);
    case BINARY:
        return new BytesWritable(ArrayUtils.EMPTY_BYTE_ARRAY);
    case STRING:
        return new Text(ArrayUtils.EMPTY_BYTE_ARRAY);
    case VARCHAR:
        return new HiveVarcharWritable(new HiveVarchar(StringUtils.EMPTY, -1));
    case CHAR:
        return new HiveCharWritable(new HiveChar(StringUtils.EMPTY, -1));
    case DECIMAL:
        return new HiveDecimalWritable();
    case INTERVAL_YEAR_MONTH:
        return new HiveIntervalYearMonthWritable();
    case INTERVAL_DAY_TIME:
        return new HiveIntervalDayTimeWritable();
    default:
        throw new RuntimeException("Primitive category " + primitiveCategory.name() + " not supported");
    }
}

From source file:org.apache.hadoop.hive.ql.exec.vector.VectorRandomRowSource.java

public static Object getWritableObject(int column, Object object,
        List<ObjectInspector> primitiveObjectInspectorList, PrimitiveCategory[] primitiveCategories,
        PrimitiveTypeInfo[] primitiveTypeInfos) {
    ObjectInspector objectInspector = primitiveObjectInspectorList.get(column);
    PrimitiveCategory primitiveCategory = primitiveCategories[column];
    PrimitiveTypeInfo primitiveTypeInfo = primitiveTypeInfos[column];
    switch (primitiveCategory) {
    case BOOLEAN:
        return ((WritableBooleanObjectInspector) objectInspector).create((boolean) object);
    case BYTE:/* ww w .  ja v  a2s. c  o  m*/
        return ((WritableByteObjectInspector) objectInspector).create((byte) object);
    case SHORT:
        return ((WritableShortObjectInspector) objectInspector).create((short) object);
    case INT:
        return ((WritableIntObjectInspector) objectInspector).create((int) object);
    case LONG:
        return ((WritableLongObjectInspector) objectInspector).create((long) object);
    case DATE:
        return ((WritableDateObjectInspector) objectInspector).create((Date) object);
    case FLOAT:
        return ((WritableFloatObjectInspector) objectInspector).create((float) object);
    case DOUBLE:
        return ((WritableDoubleObjectInspector) objectInspector).create((double) object);
    case STRING:
        return ((WritableStringObjectInspector) objectInspector).create((String) object);
    case CHAR: {
        WritableHiveCharObjectInspector writableCharObjectInspector = new WritableHiveCharObjectInspector(
                (CharTypeInfo) primitiveTypeInfo);
        return writableCharObjectInspector.create(new HiveChar(StringUtils.EMPTY, -1));
    }
    case VARCHAR: {
        WritableHiveVarcharObjectInspector writableVarcharObjectInspector = new WritableHiveVarcharObjectInspector(
                (VarcharTypeInfo) primitiveTypeInfo);
        return writableVarcharObjectInspector.create(new HiveVarchar(StringUtils.EMPTY, -1));
    }
    case BINARY:
        return PrimitiveObjectInspectorFactory.writableBinaryObjectInspector
                .create(ArrayUtils.EMPTY_BYTE_ARRAY);
    case TIMESTAMP:
        return ((WritableTimestampObjectInspector) objectInspector).create(new Timestamp(0));
    case INTERVAL_YEAR_MONTH:
        return ((WritableHiveIntervalYearMonthObjectInspector) objectInspector)
                .create(new HiveIntervalYearMonth(0));
    case INTERVAL_DAY_TIME:
        return ((WritableHiveIntervalDayTimeObjectInspector) objectInspector)
                .create(new HiveIntervalDayTime(0, 0));
    case DECIMAL: {
        WritableHiveDecimalObjectInspector writableDecimalObjectInspector = new WritableHiveDecimalObjectInspector(
                (DecimalTypeInfo) primitiveTypeInfo);
        return writableDecimalObjectInspector.create(HiveDecimal.ZERO);
    }
    default:
        throw new Error("Unknown primitive category " + primitiveCategory);
    }
}

From source file:org.apache.shindig.common.util.CharsetUtil.java

/**
 * @return UTF-8 byte array for the input string.
 *//*from  w w  w.  java2s .c  o m*/
public static byte[] getUtf8Bytes(String s) {
    if (s == null) {
        return ArrayUtils.EMPTY_BYTE_ARRAY;
    }
    ByteBuffer bb = Charsets.UTF_8.encode(s);
    return ArrayUtils.subarray(bb.array(), 0, bb.limit());

}