Example usage for java.nio ByteBuffer limit

List of usage examples for java.nio ByteBuffer limit

Introduction

In this page you can find the example usage for java.nio ByteBuffer limit.

Prototype

public final int limit() 

Source Link

Document

Returns the limit of this buffer.

Usage

From source file:org.jnetstream.filter.bpf.BPFFilter.java

/**
 * @throws FilterNotFoundException//  w w w . j  a  v  a 2 s.  c o  m
 *           if a program could not be generated or found for the specified
 *           target
 * @see com.slytechs.capture.filter.Filter#execute(java.nio.ByteBuffer)
 */
public long execute(final ByteBuffer buffer, final FilterTarget target) throws FilterException {

    BPFProgram program = map.get(target);

    if (program == null) {
        program = compile(target);
        map.put(target, program);
    }

    try {

        /*
         * Pick either the static program or use the expression to compile to BPF
         * program for each different target. We rely on expression object to
         * cache filters, of course.
         */
        final BPFProgram p;
        if (expression != null) {
            p = expression.compile(target);

        } else if (program != null && this.target == target) {
            p = program;

        } else {
            return 0; // Automatic rejection, no filter specified for this type of
            // target
        }

        final long length = BpfFactory.getForThread().execute(p, buffer, 0, buffer.limit());

        return length;

    } catch (final Exception e) {
        logger.error("Invalid BPF instruction encountered", e);
        throw new IllegalStateException("Invalid BPF instruction", e);
    }
}

From source file:ome.io.nio.RomioPixelBuffer.java

/**
 * Implemented as specified by {@link PixelBuffer} I/F.
 * @see PixelBuffer#setStack(ByteBuffer, Integer, Integer, Integer)
*//*www  .java2 s . c  om*/
public void setStack(ByteBuffer buffer, Integer z, Integer c, Integer t)
        throws IOException, DimensionsOutOfBoundsException {
    throwIfReadOnly();
    Long offset = getStackOffset(c, t);
    Integer size = getStackSize();
    if (buffer.limit() != size) {
        // Handle the size mismatch.
        if (buffer.limit() < size)
            throw new BufferUnderflowException();
        throw new BufferOverflowException();
    }

    setRegion(size, offset, buffer);
}

From source file:ome.io.nio.RomioPixelBuffer.java

/**
 * Implemented as specified by {@link PixelBuffer} I/F.
 * @see PixelBuffer#setPlane(ByteBuffer, Integer, Integer, Integer)
*///from  w w w.j  a  v a2s.  c om
public void setPlane(ByteBuffer buffer, Integer z, Integer c, Integer t)
        throws IOException, DimensionsOutOfBoundsException {
    throwIfReadOnly();
    Long offset = getPlaneOffset(z, c, t);
    Integer size = getPlaneSize();
    if (buffer.limit() != size) {
        // Handle the size mismatch.
        if (buffer.limit() < size)
            throw new BufferUnderflowException();
        throw new BufferOverflowException();
    }

    setRegion(size, offset, buffer);
}

From source file:com.l2jfree.network.mmocore.ReadWriteThread.java

private void parseClientPacket(ByteBuffer buf, int dataSize, T client) {
    final int pos = buf.position();
    final DataSizeHolder dsh = getDataSizeHolder().init(dataSize);

    if (client.decipher(buf, dsh) && buf.hasRemaining()) {
        // remove useless bytes
        dsh.decreaseSize(dsh.getMinPadding());
        // calculate possibly remaining useless bytes
        final int maxPossiblePadding = dsh.getMaxPadding() - dsh.getMinPadding();

        // apply limit
        final int limit = buf.limit();
        buf.limit(pos + dsh.getSize());/*from w  w w .j  av  a2s. c  o m*/

        final int opcode = buf.get() & 0xFF;

        if (getMMOController().canReceivePacketFrom(client, opcode)) {
            RP cp = getPacketHandler().handlePacket(buf, client, opcode);

            if (cp != null) {
                System.out.println("READ: " + client.getState() + " " + cp.getClass().getSimpleName());

                // remove useless bytes #2, using packet specs
                int maxLeftoverPadding = maxPossiblePadding;
                final int overflow = buf.remaining() - cp.getMaximumLength();
                if (maxPossiblePadding > 0 && // there may be useless bytes
                        overflow > 0) // and we have too much
                {
                    // avoid any damage to the packet body
                    final int removable = Math.min(overflow, maxPossiblePadding);
                    buf.limit(buf.limit() - removable);
                    maxLeftoverPadding -= removable;
                }

                getMmoBuffer().setByteBuffer(buf);
                cp.setClient(client);

                try {
                    if (getMmoBuffer().getAvailableBytes() < cp.getMinimumLength()) {
                        getMMOController().report(ErrorMode.BUFFER_UNDER_FLOW, client, cp, null);
                    } else if (getMmoBuffer().getAvailableBytes() > cp.getMaximumLength()) {
                        getMMOController().report(ErrorMode.BUFFER_OVER_FLOW, client, cp, null);
                    } else {
                        cp.read(getMmoBuffer());

                        client.executePacket(cp);

                        if (buf.hasRemaining() && // some unused data, a bad sign
                                buf.remaining() > maxLeftoverPadding) // and definitely not padded bytes
                        {
                            // FIXME disabled until packet structures updated properly
                            //report(ErrorMode.BUFFER_OVER_FLOW, client, cp, null);

                            MMOController._log.info("Invalid packet format (buf: " + buf + ", dataSize: "
                                    + dataSize + ", pos: " + pos + ", limit: " + limit + ", opcode: 0x"
                                    + HexUtil.fillHex(opcode, 2) + ") used for reading - " + client + " - "
                                    + cp.getType() + " - " + getMMOController().getVersionInfo());
                        }
                    }
                } catch (BufferUnderflowException e) {
                    getMMOController().report(ErrorMode.BUFFER_UNDER_FLOW, client, cp, e);
                } catch (RuntimeException e) {
                    getMMOController().report(ErrorMode.FAILED_READING, client, cp, e);
                }

                getMmoBuffer().setByteBuffer(null);
            }
        }

        buf.limit(limit);
    }
}

From source file:org.apache.hadoop.hive.ql.exec.vector.expressions.TestVectorTimestampExpressions.java

private byte[] encodeTime(Timestamp timestamp) {
    ByteBuffer encoded;
    long time = timestamp.getTime();
    try {/*  w  ww  .j  av a  2 s  . c  o  m*/
        String formatted = dateFormat.format(new Date(time));
        encoded = Text.encode(formatted);
    } catch (CharacterCodingException e) {
        throw new RuntimeException(e);
    }
    return Arrays.copyOf(encoded.array(), encoded.limit());
}

From source file:net.jenet.Host.java

/**
 * @param buffer//from w ww .  j  a  va 2 s. c  om
 * @return
 */
int receive(ByteBuffer buffer) {
    try {
        buffer.clear();
        receivedAddress = (InetSocketAddress) communicationChannel.receive(buffer);
        buffer.flip();
        if (receivedAddress != null)
            LOG.debug("Host.receive:" + address + ". Received " + buffer.limit() + " bytes  from "
                    + receivedAddress);
        return buffer.limit();
    } catch (Exception e) {
        LOG.error("Host.receive: Error reading buffers.", e);
        return -1;
    }
}

From source file:org.commoncrawl.util.S3Uploader.java

public boolean read(NIOBufferList dataBuffer) throws IOException {

    ByteBuffer buffer = null;

    if ((buffer = _writeBuffer.read()) != null) {
        _bytesUploaded += buffer.remaining();
        BandwidthStats stats = new BandwidthStats();
        _rateLimiter.getStats(stats);//from w  w w . j  av a 2 s.  c  om
        System.out.println("[" + _slot + "]ID:" + _Id + " read Callback for S3Uploader for Path:"
                + _uploadTarget.getName() + " returned:" + buffer.remaining() + " Bytes TotalBytesRead:"
                + _bytesUploaded + " Rate:" + stats.scaledBitsPerSecond + " " + stats.scaledBitsUnits);
        buffer.position(buffer.limit());
        dataBuffer.write(buffer);
        dataBuffer.flush();
    }

    boolean eof = false;

    synchronized (_writeBuffer) {
        eof = _writeBuffer.available() == 0 && _loadComplete;
    }

    return eof;
}

From source file:org.alfresco.contentstore.patch.PatchServiceImpl.java

@Override
public void updatePatchDocument(PatchDocument patchDocument, NodeChecksums checksums, ByteBuffer data) {
    int blockSize = checksums.getBlockSize();

    patchDocument.setBlockSize(blockSize);

    int i = 0;/*from  ww  w . j a  v a2  s  .c o m*/

    Adler32 adlerInfo = new Adler32(hasher);
    int lastMatchIndex = 0;
    ByteBuffer currentPatch = ByteBuffer.allocate(600000); // TODO

    int currentPatchSize = 0;

    for (;;) {
        int chunkSize = 0;
        // determine the size of the next data chuck to evaluate. Default to
        // blockSize, but clamp to end of data
        if ((i + blockSize) > data.limit()) {
            chunkSize = data.limit() - i;
            adlerInfo.reset(); // need to reset this because the rolling
                               // checksum doesn't work correctly on a final
                               // non-aligned block
        } else {
            chunkSize = blockSize;
        }

        int matchedBlock = adlerInfo.checkMatch(lastMatchIndex, checksums, data, i, i + chunkSize - 1);
        if (matchedBlock != -1) {
            //                try
            //                {
            //                    String y = hasher.md5(data, i, i + chunkSize - 1);
            //                    System.out.println("y = " + y);
            //                }
            //                catch (NoSuchAlgorithmException e)
            //                {
            //                    // TODO Auto-generated catch block
            //                    e.printStackTrace();
            //                }
            // if we have a match, do the following:
            // 1) add the matched block index to our tracking buffer
            // 2) check to see if there's a current patch. If so, add it to
            // the patch document.
            // 3) jump forward blockSize bytes and continue
            patchDocument.addMatchedBlock(matchedBlock);

            if (currentPatchSize > 0) {
                // there are outstanding patches, add them to the list
                // create the patch and append it to the patches buffer
                currentPatch.flip();
                int size = currentPatch.limit();
                byte[] dst = new byte[size];
                currentPatch.get(dst, 0, size);
                Patch patch = new Patch(lastMatchIndex, size, dst);
                patchDocument.addPatch(patch);
                currentPatch.clear();
            }

            lastMatchIndex = matchedBlock;

            i += chunkSize;

            adlerInfo.reset();

            continue;
        } else {
            // while we don't have a block match, append bytes to the
            // current patch
            logger.debug("limit = " + currentPatch.limit() + ", position = " + currentPatch.position());
            currentPatch.put(data.get(i));
            currentPatchSize++;
        }
        if (i >= data.limit() - 1) {
            break;
        }
        i++;
    } // end for each byte in the data

    if (currentPatchSize > 0) {
        currentPatch.flip();
        int size = currentPatch.limit();
        byte[] dst = new byte[size];
        currentPatch.get(dst, 0, size);
        Patch patch = new Patch(lastMatchIndex, size, dst);
        patchDocument.addPatch(patch);
    }
}

From source file:fuse.okuyamafs.OkuyamaFilesystem.java

public int write(String path, Object fh, boolean isWritepage, ByteBuffer buf, long offset)
        throws FuseException {
    log.info("write  path:" + path + " offset:" + offset + " isWritepage:" + isWritepage + " buf.limit:"
            + buf.limit());
    //long startAA = System.nanoTime();
    try {/*from   ww w  . jav  a2s  .c o  m*/

        if (startTimeAAA == 0L)
            startTimeAAA = System.nanoTime();
        // ??????????????
        if (OkuyamaFilesystem.storageType == 1)
            return realWrite(path, fh, isWritepage, buf, offset);

        if (fh == null)
            return Errno.EBADE;

        synchronized (this.parallelDataAccessSync[((path.hashCode() << 1) >>> 1) % 100]) {

            if (appendWriteDataBuf.containsKey(fh)) {

                Map appendData = (Map) appendWriteDataBuf.get(fh);
                ByteArrayOutputStream bBuf = (ByteArrayOutputStream) appendData.get("buf");
                long bOffset = ((Long) appendData.get("offset")).longValue();

                if ((bOffset + bBuf.size()) == offset) {

                    byte[] tmpBuf = new byte[buf.limit()];
                    buf.get(tmpBuf);
                    bBuf.write(tmpBuf);

                    // ??????????????????
                    if (bBuf.size() >= writeBufferSize) {

                        // ???????
                        appendWriteDataBuf.remove(fh);
                        String bPath = (String) appendData.get("path");
                        Object bFh = (Object) appendData.get("fh");
                        boolean bIsWritepage = ((Boolean) appendData.get("isWritepage")).booleanValue();

                        int ret = this.realWrite(bPath, bFh, bIsWritepage, bBuf, bOffset);

                        return ret;
                    } else {

                        return 0;
                    }
                } else {

                    // offset?????????????
                    appendWriteDataBuf.remove(fh);
                    String bPath = (String) appendData.get("path");
                    Object bFh = (Object) appendData.get("fh");
                    boolean bIsWritepage = ((Boolean) appendData.get("isWritepage")).booleanValue();

                    int realWriteRet = this.realWrite(bPath, bFh, bIsWritepage, bBuf, bOffset);

                    if (realWriteRet == 0) {

                        int retI = this.realWrite(path, fh, isWritepage, buf, offset);

                        return retI;
                    } else {
                        return realWriteRet;
                    }
                }
            } else {

                Map appendData = new HashMap();
                appendData.put("path", path);
                appendData.put("fh", fh);
                appendData.put("isWritepage", isWritepage);
                ByteArrayOutputStream baos = new ByteArrayOutputStream(1024 * 1024 * 10 + 8192);
                byte[] tmpByte = new byte[buf.limit()];
                buf.get(tmpByte);
                baos.write(tmpByte);
                appendData.put("buf", baos);
                appendData.put("offset", offset);
                this.appendWriteDataBuf.put(fh, appendData);
                this.writeBufFpMap.addGroupingData(path, fh);

                return 0;
            }
        }
    } catch (Exception e) {

        throw new FuseException(e);
    } finally {
    }
}

From source file:com.flexive.core.stream.BinaryUploadProtocol.java

/**
 * {@inheritDoc}// ww w. ja v a2 s  .c o  m
 */
@Override
public synchronized boolean receiveStream(ByteBuffer buffer) throws IOException {
    if (!buffer.hasRemaining()) {
        //this can only happen on remote clients
        if (LOG.isDebugEnabled())
            LOG.debug("aborting (empty)");
        return false;
    }
    if (!rcvStarted) {
        rcvStarted = true;
        if (LOG.isDebugEnabled())
            LOG.debug("(internal serverside) receive start");
        try {
            pout = getContentStorage().receiveTransitBinary(division, handle, mimeType, expectedLength,
                    timeToLive);
        } catch (SQLException e) {
            LOG.error("SQL Error trying to receive binary stream: " + e.getMessage(), e);
        } catch (FxNotFoundException e) {
            LOG.error("Failed to lookup content storage for division #" + division + ": "
                    + e.getLocalizedMessage());
        }
    }
    if (LOG.isDebugEnabled() && count + buffer.remaining() > expectedLength) {
        LOG.debug("poss. overflow: pos=" + buffer.position() + " lim=" + buffer.limit() + " cap="
                + buffer.capacity());
        LOG.debug("Curr count: " + count + " count+rem="
                + (count + buffer.remaining() + " delta:" + ((count + buffer.remaining()) - expectedLength)));
    }
    count += buffer.remaining();
    pout.write(buffer.array(), buffer.position(), buffer.remaining());
    buffer.clear();
    if (expectedLength > 0 && count >= expectedLength) {
        if (LOG.isDebugEnabled())
            LOG.debug("aborting");
        return false;
    }
    return true;
}