Example usage for java.nio ByteBuffer getClass

List of usage examples for java.nio ByteBuffer getClass

Introduction

In this page you can find the example usage for java.nio ByteBuffer getClass.

Prototype

@HotSpotIntrinsicCandidate
public final native Class<?> getClass();

Source Link

Document

Returns the runtime class of this Object .

Usage

From source file:com.gpl_compression.lzo.LzoCompressor.java

/**
 * Reallocates a direct byte buffer by freeing the old one and allocating
 * a new one, unless the size is the same, in which case it is simply
 * cleared and returned./* www .ja v a2s.  com*/
 *
 * NOTE: this uses unsafe APIs to manually free memory - if anyone else
 * has a reference to the 'buf' parameter they will likely read random
 * data or cause a segfault by accessing it.
 */
private ByteBuffer realloc(ByteBuffer buf, int newSize) {
    if (buf != null) {
        if (buf.capacity() == newSize) {
            // Can use existing buffer
            buf.clear();
            return buf;
        }
        try {
            // Manually free the old buffer using undocumented unsafe APIs.
            // If this fails, we'll drop the reference and hope GC finds it
            // eventually.
            Object cleaner = buf.getClass().getMethod("cleaner").invoke(buf);
            cleaner.getClass().getMethod("clean").invoke(cleaner);
        } catch (Exception e) {
            // Perhaps a non-sun-derived JVM - contributions welcome
            LOG.warn("Couldn't realloc bytebuffer", e);
        }
    }
    return ByteBuffer.allocateDirect(newSize);
}

From source file:com.hadoop.compression.lzo.LzoCompressor.java

/**
 * Reallocates a direct byte buffer by freeing the old one and allocating
 * a new one, unless the size is the same, in which case it is simply
 * cleared and returned.//www .  ja v a  2 s .  c om
 *
 * NOTE: this uses unsafe APIs to manually free memory - if anyone else
 * has a reference to the 'buf' parameter they will likely read random
 * data or cause a segfault by accessing it.
 */
private ByteBuffer realloc(ByteBuffer buf, int newSize) {
    if (buf != null) {
        if (buf.capacity() == newSize) {
            // Can use existing buffer
            buf.clear();
            return buf;
        }
        try {
            // Manually free the old buffer using undocumented unsafe APIs.
            // If this fails, we'll drop the reference and hope GC finds it
            // eventually.
            Method cleanerMethod = buf.getClass().getMethod("cleaner");
            cleanerMethod.setAccessible(true);
            Object cleaner = cleanerMethod.invoke(buf);
            Method cleanMethod = cleaner.getClass().getMethod("clean");
            cleanMethod.setAccessible(true);
            cleanMethod.invoke(cleaner);
        } catch (Exception e) {
            // Perhaps a non-sun-derived JVM - contributions welcome
            LOG.warn("Couldn't realloc bytebuffer", e);
        }
    }
    return ByteBuffer.allocateDirect(newSize);
}

From source file:org.apache.hadoop.hbase.io.hfile.slab.Slab.java

private void allocateAndSlice(int size, int sliceSize) {
    ByteBuffer newSlab = ByteBuffer.allocateDirect(size);
    slabs.add(newSlab);//w ww .  j av a  2s.c  om
    for (int j = 0; j < newSlab.capacity(); j += sliceSize) {
        newSlab.limit(j + sliceSize).position(j);
        ByteBuffer aSlice = newSlab.slice();
        buffers.add(aSlice);
        heapSize += ClassSize.estimateBase(aSlice.getClass(), false);
    }
}

From source file:org.apache.hadoop.hbase.io.hfile.TestHFileBlock.java

protected void testBlockHeapSizeInternals() {
    if (ClassSize.is32BitJVM()) {
        assertTrue(HFileBlock.BYTE_BUFFER_HEAP_SIZE == 64);
    } else {/*from  w w  w . ja v  a 2  s. co  m*/
        assertTrue(HFileBlock.BYTE_BUFFER_HEAP_SIZE == 80);
    }

    for (int size : new int[] { 100, 256, 12345 }) {
        byte[] byteArr = new byte[HConstants.HFILEBLOCK_HEADER_SIZE + size];
        ByteBuffer buf = ByteBuffer.wrap(byteArr, 0, size);
        HFileContext meta = new HFileContextBuilder().withIncludesMvcc(includesMemstoreTS)
                .withIncludesTags(includesTag).withHBaseCheckSum(false).withCompression(Algorithm.NONE)
                .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM).withChecksumType(ChecksumType.NULL)
                .build();
        HFileBlock block = new HFileBlock(BlockType.DATA, size, size, -1, buf, HFileBlock.FILL_HEADER, -1, 0,
                meta);
        long byteBufferExpectedSize = ClassSize
                .align(ClassSize.estimateBase(buf.getClass(), true) + HConstants.HFILEBLOCK_HEADER_SIZE + size);
        long hfileMetaSize = ClassSize.align(ClassSize.estimateBase(HFileContext.class, true));
        long hfileBlockExpectedSize = ClassSize.align(ClassSize.estimateBase(HFileBlock.class, true));
        long expected = hfileBlockExpectedSize + byteBufferExpectedSize + hfileMetaSize;
        assertEquals(
                "Block data size: " + size + ", byte buffer expected " + "size: " + byteBufferExpectedSize
                        + ", HFileBlock class expected " + "size: " + hfileBlockExpectedSize + ";",
                expected, block.heapSize());
    }
}

From source file:org.apache.hadoop.hbase.util.DirectMemoryUtils.java

/**
 * DirectByteBuffers are garbage collected by using a phantom reference and a
 * reference queue. Every once a while, the JVM checks the reference queue and
 * cleans the DirectByteBuffers. However, as this doesn't happen
 * immediately after discarding all references to a DirectByteBuffer, it's
 * easy to OutOfMemoryError yourself using DirectByteBuffers. This function
 * explicitly calls the Cleaner method of a DirectByteBuffer.
 * // w  ww. j  a v  a  2s  .c  o  m
 * @param toBeDestroyed
 *          The DirectByteBuffer that will be "cleaned". Utilizes reflection.
 *          
 */
public static void destroyDirectByteBuffer(ByteBuffer toBeDestroyed) throws IllegalArgumentException,
        IllegalAccessException, InvocationTargetException, SecurityException, NoSuchMethodException {

    Preconditions.checkArgument(toBeDestroyed.isDirect(), "toBeDestroyed isn't direct!");

    Method cleanerMethod = toBeDestroyed.getClass().getMethod("cleaner");
    cleanerMethod.setAccessible(true);
    Object cleaner = cleanerMethod.invoke(toBeDestroyed);
    Method cleanMethod = cleaner.getClass().getMethod("clean");
    cleanMethod.setAccessible(true);
    cleanMethod.invoke(cleaner);
}

From source file:org.commoncrawl.util.shared.MMapUtils.java

/**
 * Try to unmap the buffer, this method silently fails if no support
 * for that in the JVM. On Windows, this leads to the fact,
 * that mmapped files cannot be modified or deleted.
 *///from   w  w  w.ja va2  s  .  c  o m
final static void cleanMapping(final ByteBuffer buffer) throws IOException {
    if (getUseUnmap()) {
        try {
            AccessController.doPrivileged(new PrivilegedExceptionAction<Object>() {
                public Object run() throws Exception {
                    final Method getCleanerMethod = buffer.getClass().getMethod("cleaner");
                    getCleanerMethod.setAccessible(true);
                    final Object cleaner = getCleanerMethod.invoke(buffer);
                    if (cleaner != null) {
                        cleaner.getClass().getMethod("clean").invoke(cleaner);
                    }
                    return null;
                }
            });
        } catch (PrivilegedActionException e) {
            final IOException ioe = new IOException("unable to unmap the mapped buffer");
            ioe.initCause(e.getCause());
            throw ioe;
        }
    }
}