Example usage for java.nio ByteBuffer duplicate

List of usage examples for java.nio ByteBuffer duplicate

Introduction

In this page you can find the example usage for java.nio ByteBuffer duplicate.

Prototype

public abstract ByteBuffer duplicate();

Source Link

Document

Returns a duplicated buffer that shares its content with this buffer.

Usage

From source file:org.apache.jackrabbit.oak.plugins.segment.file.TarReader.java

private static Map<UUID, List<UUID>> parseGraph(ByteBuffer graphByteBuffer) {
    int count = graphByteBuffer.getInt(graphByteBuffer.limit() - 12);

    ByteBuffer buffer = graphByteBuffer.duplicate();
    buffer.limit(graphByteBuffer.limit() - 16);

    List<UUID> uuids = newArrayListWithCapacity(count);
    for (int i = 0; i < count; i++) {
        uuids.add(new UUID(buffer.getLong(), buffer.getLong()));
    }/*from   w  w w . j a v a2s .co  m*/

    Map<UUID, List<UUID>> graph = newHashMap();
    while (buffer.hasRemaining()) {
        UUID uuid = uuids.get(buffer.getInt());
        List<UUID> list = newArrayList();
        int refid = buffer.getInt();
        while (refid != -1) {
            list.add(uuids.get(refid));
            refid = buffer.getInt();
        }
        graph.put(uuid, list);
    }
    return graph;
}

From source file:org.apache.jackrabbit.oak.segment.file.TarReader.java

private static Map<UUID, List<UUID>> parseGraph(ByteBuffer graphByteBuffer, boolean bulkOnly) {
    int count = graphByteBuffer.getInt(graphByteBuffer.limit() - 12);

    ByteBuffer buffer = graphByteBuffer.duplicate();
    buffer.limit(graphByteBuffer.limit() - 16);

    List<UUID> uuids = newArrayListWithCapacity(count);
    for (int i = 0; i < count; i++) {
        uuids.add(new UUID(buffer.getLong(), buffer.getLong()));
    }//from ww  w . j  a v a  2s  .c  om

    Map<UUID, List<UUID>> graph = newHashMap();
    while (buffer.hasRemaining()) {
        UUID uuid = uuids.get(buffer.getInt());
        List<UUID> list = newArrayList();
        int refid = buffer.getInt();
        while (refid != -1) {
            UUID ref = uuids.get(refid);
            if (!bulkOnly || !isDataSegmentId(ref.getLeastSignificantBits())) {
                list.add(ref);
            }
            refid = buffer.getInt();
        }
        graph.put(uuid, list);
    }
    return graph;
}

From source file:org.apache.qpid.server.store.derby.DerbyMessageStore.java

private void addContent(Connection conn, long messageId, ByteBuffer src) {
    if (_logger.isDebugEnabled()) {
        _logger.debug("Adding content for message " + messageId);
    }// w ww .j a  va 2 s  . c  om
    PreparedStatement stmt = null;

    try {
        src = src.slice();

        byte[] chunkData = new byte[src.limit()];
        src.duplicate().get(chunkData);

        stmt = conn.prepareStatement(INSERT_INTO_MESSAGE_CONTENT);
        stmt.setLong(1, messageId);

        ByteArrayInputStream bis = new ByteArrayInputStream(chunkData);
        stmt.setBinaryStream(2, bis, chunkData.length);
        stmt.executeUpdate();
    } catch (SQLException e) {
        closeConnection(conn);
        throw new RuntimeException("Error adding content for message " + messageId + ": " + e.getMessage(), e);
    } finally {
        closePreparedStatement(stmt);
    }

}

From source file:org.apache.usergrid.persistence.index.utils.ConversionUtils.java

public static Object object(Class<?> type, ByteBuffer bytes) {

    try {//w  w w .  j  a v a 2  s  . c o m
        if (Long.class.isAssignableFrom(type)) {
            return bytes.slice().getLong();
        } else if (UUID.class.isAssignableFrom(type)) {
            return uuid(bytes);
        } else if (String.class.isAssignableFrom(type)) {
            return string(bytes);
        } else if (Boolean.class.isAssignableFrom(type)) {
            return bytes.slice().get() != 0;
        } else if (Integer.class.isAssignableFrom(type)) {
            return bytes.slice().getInt();
        } else if (Double.class.isAssignableFrom(type)) {
            return bytes.slice().getDouble();
        } else if (Float.class.isAssignableFrom(type)) {
            return bytes.slice().getFloat();
        } else if (ByteBuffer.class.isAssignableFrom(type)) {
            return bytes.duplicate();
        } else if (byte[].class.isAssignableFrom(type)) {
            byte[] b = new byte[bytes.remaining()];
            bytes.slice().get(b);
            return b;
        }
    } catch (Exception e) {
        logger.error("Unable to get object from bytes for type {}", type.getName(), e);
    }
    return null;
}

From source file:org.apache.usergrid.persistence.map.impl.MapSerializationImpl.java

public static ByteBuffer serializeKeys(UUID ownerUUID, String ownerType, String mapName, String mapKey,
        int bucketNumber) {

    List<Object> keys = new ArrayList<>(4);
    keys.add(0, ownerUUID);//from   w w w. j a  va  2 s  .  c o  m
    keys.add(1, ownerType);
    keys.add(2, mapName);
    keys.add(3, mapKey);

    if (bucketNumber > 0) {
        keys.add(4, bucketNumber);
    }

    // UUIDs are 16 bytes, allocate the buffer accordingly
    int size = 16 + ownerType.getBytes().length + mapName.getBytes().length + mapKey.getBytes().length;
    if (bucketNumber > 0) {
        // ints are 4 bytes
        size += 4;
    }

    // we always need to add length for the 2 byte short and 1 byte equality
    size += keys.size() * 3;

    ByteBuffer stuff = ByteBuffer.allocate(size);

    for (Object key : keys) {

        ByteBuffer kb = DataType.serializeValue(key, ProtocolVersion.NEWEST_SUPPORTED);
        if (kb == null) {
            kb = ByteBuffer.allocate(0);
        }

        stuff.putShort((short) kb.remaining());
        stuff.put(kb.slice());
        stuff.put((byte) 0);

    }
    stuff.flip();
    return stuff.duplicate();

}

From source file:org.cloudata.core.commitlog.pipe.Bulk.java

private boolean readHeader(ByteBuffer buf) throws IOException {
    ByteBuffer headerBuf = buf.duplicate();

    if (seq < 0 && headerBuf.limit() >= readBytesLength) {
        headerBuf.position(0);//from  www.  ja  v  a  2  s .c om
        seq = headerBuf.getInt();

        if (seq == Constants.PIPE_DISCONNECT) {
            LOG.debug("receive PIPE_DISCONNECT");
            throw new PipeClosing();
        }

        readBytesLength += 4;
    }

    if (dirNameBytes == null && headerBuf.limit() >= readBytesLength) {
        headerBuf.position(readBytesLength - 4);
        int len = headerBuf.getInt();
        if (len > 1000000) {
            throw new IOException("dirName byte length is too long [" + len + "]");
        }

        dirNameBytes = new byte[len];
        readBytesLength += len;
    }

    if (dirNameBytes != null && headerBuf.limit() >= readBytesLength) {
        headerBuf.position(readBytesLength - dirNameBytes.length);
        headerBuf.get(dirNameBytes);
        readBytesLength += 4;
    }

    if (headerAndPayloadSize == 0 && headerBuf.limit() >= readBytesLength) {
        headerBuf.position(readBytesLength - 4);
        headerAndPayloadSize = headerBuf.getInt();

        return true;
    }

    return false;
}

From source file:org.cloudata.core.commitlog.pipe.Bulk.java

public OperationResult write(SocketChannel ch) throws IOException {
    if (bufferList.size() == 0) {
        throw new IOException("Pipe is closed");
    }/*from  w  ww.ja v  a2  s  . co m*/

    int numWritten = 0;

    while (true) {
        ByteBuffer readBuf = bufferList.get(currentWriteBufIndex);
        ByteBuffer writeBuffer = readBuf.duplicate();
        writeBuffer.position(writtenPos);
        writeBuffer.limit(readBuf.position());

        numWritten = ch.write(writeBuffer);

        writtenPos += numWritten;
        totalNumWritten += numWritten;

        if (writeBuffer.hasRemaining()) {
            return OperationResult.partially;
        }

        //LOG.info("totalNumWritten : " + totalNumWritten + ", totalNumRead : " + totalNumRead);
        if (totalNumWritten < totalNumRead) {
            if (currentWriteBufIndex < currentReadBufIndex) {
                currentWriteBufIndex++;
                writtenPos = 0;
            } else {
                return OperationResult.partially;
            }
        } else {
            return OperationResult.completed;
        }
    }
}

From source file:org.cloudata.core.commitlog.pipe.Message.java

private ByteBuffer duplicate(ByteBuffer buf) {
    // LOG.debug("before pos : " + buf.position());
    // LOG.debug("before limit : " + buf.limit());

    ByteBuffer ret = buf.duplicate();

    if (ret.position() == ret.limit()) {
        ret.flip();//from  ww w  . ja  v  a  2  s.c o m
    }

    // LOG.debug("after pos : " + ret.position());
    // LOG.debug("after limit : " + ret.limit());

    return ret;
}

From source file:org.commoncrawl.service.queryserver.master.S3Helper.java

public static ArcFileItem retrieveArcFileItem(ArchiveInfo archiveInfo, EventLoop eventLoop) throws IOException {

    // the default bucket id 
    String bucketId = "commoncrawl-crawl-002";

    //ok, see if we need to switch buckets 
    if (archiveInfo.getCrawlNumber() == 1) {
        bucketId = "commoncrawl";
    }// w w w.j  a  va 2 s  .  c  om

    S3Downloader downloader = new S3Downloader(bucketId, "", "", false);

    // now activate the segment log ... 
    final Semaphore downloadCompleteSemaphore = new Semaphore(0);
    final StreamingArcFileReader arcFileReader = new StreamingArcFileReader(false);
    //arcFileReader.setArcFileHasHeaderItemFlag(false);

    // create a buffer list we will append incoming content into ... 
    final LinkedList<ByteBuffer> bufferList = new LinkedList<ByteBuffer>();

    downloader.initialize(new S3Downloader.Callback() {

        @Override
        public boolean contentAvailable(int itemId, String itemKey, NIOBufferList contentBuffer) {
            LOG.info("ContentQuery contentAvailable called for Item:" + itemKey + " totalBytesAvailable:"
                    + contentBuffer.available());

            try {
                while (contentBuffer.available() != 0) {
                    bufferList.add(contentBuffer.read());
                }
                return true;
            } catch (IOException e) {
                LOG.error(CCStringUtils.stringifyException(e));
                return false;
            }
        }

        @Override
        public void downloadComplete(int itemId, String itemKey) {
            LOG.info("S3 Download Complete for item:" + itemKey);
            downloadCompleteSemaphore.release();
        }

        @Override
        public void downloadFailed(int itemId, String itemKey, String errorCode) {
            LOG.info("S3 Download Failed for item:" + itemKey);
            downloadCompleteSemaphore.release();
        }

        @Override
        public boolean downloadStarting(int itemId, String itemKey, int contentLength) {
            LOG.info("ContentQuery DownloadStarting for Item:" + itemKey + " contentLength:" + contentLength);
            return true;
        }

    }, eventLoop);

    LOG.info("Starting request for Item:"
            + hdfsNameToS3ArcFileName(archiveInfo.getArcfileDate(), archiveInfo.getArcfileIndex()) + " Offset:"
            + archiveInfo.getArcfileOffset());

    int sizeToRetrieve = (archiveInfo.getCompressedSize() != 0) ? archiveInfo.getCompressedSize() : 30000;
    sizeToRetrieve += 10;

    downloader.fetchPartialItem(
            hdfsNameToS3ArcFileName(archiveInfo.getArcfileDate(), archiveInfo.getArcfileIndex()),
            archiveInfo.getArcfileOffset() - 10, sizeToRetrieve);
    downloadCompleteSemaphore.acquireUninterruptibly();

    if (bufferList.size() == 0) {
        return null;
    }

    ByteBuffer firstBuffer = bufferList.getFirst();
    if (firstBuffer != null) {
        int offsetToGZIPHeader = scanForGZIPHeader(firstBuffer.duplicate());
        if (offsetToGZIPHeader != -1) {
            firstBuffer.position(offsetToGZIPHeader);
            LOG.info("*** Offset to GZIP Header:" + offsetToGZIPHeader);
        } else {
            LOG.error("*** Failed to find GZIP Header offset");
        }
    }

    // now try to decode content if possible
    for (ByteBuffer buffer : bufferList) {
        LOG.info("Adding Buffer of Size:" + buffer.remaining() + " Position:" + buffer.position() + " Limit:"
                + buffer.limit());
        arcFileReader.available(buffer);
    }

    ArcFileItem item = arcFileReader.getNextItem();

    if (item != null) {
        LOG.info("Request Returned item:" + item.getUri());
        LOG.info("Uncompressed Size:" + item.getContent().getCount());
    }
    return item;
}

From source file:org.grouplens.lenskit.data.dao.packed.BinaryIndexTable.java

/**
 * Create a binary index table.//from w w  w  .ja va  2 s .c  o  m
 * @param nentries The number of entries in the table.
 * @param buffer The table buffer.  Its position will be advanced to the end of the table.
 * @return The index table.
 */
public static BinaryIndexTable fromBuffer(int nentries, ByteBuffer buffer) {
    logger.debug("reading table of {} entries", nentries);
    long[] keys = new long[nentries];
    int[] offsets = new int[nentries];
    int[] sizes = new int[nentries];
    int nextExpectedOffset = 0;
    for (int i = 0; i < nentries; i++) {
        keys[i] = buffer.getLong();
        if (i > 0 && keys[i - 1] >= keys[i]) {
            logger.error("key {} is not greater than previous key {}", keys[i], keys[i - 1]);
            throw new IllegalArgumentException("corrupted index table");
        }
        offsets[i] = buffer.getInt();
        sizes[i] = buffer.getInt();
        if (offsets[i] != nextExpectedOffset) {
            logger.error("expected offset {}, got {}", nextExpectedOffset, offsets[i]);
            throw new IllegalArgumentException("corrupted index table");
        }
        nextExpectedOffset += sizes[i];
    }
    if (buffer.remaining() < nextExpectedOffset) {
        throw new IllegalArgumentException("buffer not large enough");
    }
    int end = buffer.position() + nextExpectedOffset * 4;
    ByteBuffer dup = buffer.duplicate();
    dup.limit(end);
    buffer.position(end);
    LongKeyDomain dom = LongKeyDomain.wrap(keys, keys.length, true);
    return new BinaryIndexTable(dom, offsets, sizes, dup.asIntBuffer());
}