Example usage for java.nio ByteBuffer remaining

List of usage examples for java.nio ByteBuffer remaining

Introduction

In this page you can find the example usage for java.nio ByteBuffer remaining.

Prototype

public final int remaining() 

Source Link

Document

Returns the number of remaining elements in this buffer, that is limit - position .

Usage

From source file:org.apache.hadoop.hdfs.DFSInputStream.java

@Override
public synchronized int read(final ByteBuffer buf) throws IOException {
    ReaderStrategy byteBufferReader = new ByteBufferStrategy(buf);
    try (TraceScope ignored = dfsClient.newPathTraceScope("DFSInputStream#byteBufferRead", src)) {
        return readWithStrategy(byteBufferReader, 0, buf.remaining());
    }/*from w  w  w . ja v a2s.  c  o m*/
}

From source file:org.commoncrawl.io.NIOHttpConnection.java

public int Readable(NIOClientSocket theSocket) throws IOException {

    if (!theSocket.isOpen()) {
        LOG.error("Connection:[" + getId() + "] Readable Called on Closed Socket");
        return -1;
    }/*from  w w  w . j a  v  a 2 s .c om*/

    int totalBytesRead = 0;
    int singleReadAmount = 0;
    boolean overflow = false;
    boolean disconnected = false;

    try {

        if (_downloadMax == -1 || _totalRead < _downloadMax) {

            do {

                ByteBuffer buffer = _inBuf.getWriteBuf();

                if (_downloadMax != -1) {
                    if (_totalRead + buffer.remaining() > _downloadMax) {
                        int overflowAmt = (_totalRead + buffer.remaining()) - _downloadMax;
                        buffer.limit(buffer.limit() - overflowAmt);
                    }
                }

                singleReadAmount = _socket.read(buffer);

                if (singleReadAmount > 0) {
                    _inBuf.write(buffer);
                    _totalRead += singleReadAmount;
                    _cumilativeRead += singleReadAmount;
                    totalBytesRead += singleReadAmount;
                }

            } while (singleReadAmount > 0 && (_downloadMax == -1 || _totalRead < _downloadMax));

            if (_downloadMax != -1 && _totalRead == _downloadMax) {
                overflow = true;
                _contentTruncated = true;
            }
        }

        if (totalBytesRead > 0) {
            // flush any written buffers .
            _inBuf.flush();
            // process incoming buffer
            processIncomingData(totalBytesRead);
        }

        if (singleReadAmount == -1 || overflow) {

            disconnected = true;

            if (getState() == State.RECEIVING_CONTENT
                    && (overflow || _contentLength == -1 || _contentLength == _downloadedContentLength)) {

                // if we are still in the middle of processing chunked data ...
                if (_chunked) {
                    // clear out existing input buffer ...
                    _inBuf.reset();
                    // and if a chunk buffer is available ...
                    if (_chunkContentBuffer != null) {
                        // take what we can get ...

                        // flush chunk buffer ...
                        _chunkContentBuffer.flush();
                        // and swap it with the real content buffer ...
                        _inBuf = _chunkContentBuffer;
                        // reset chunk state ...
                        _chunkContentBuffer = null;
                    }
                    // reset chunked flag ...
                    _chunked = false;

                    // and now, if this is NOT an overflow condidition ...
                    if (!overflow) {
                        // interpret this as an error ...
                        setErrorType(ErrorType.IOEXCEPTION);
                        setErrorDesc("Connection Closed Before Receiving Chunk Trailer");
                        setState(State.ERROR, new java.net.SocketException());
                    }
                }

                // now check one more time of we are are in the proper state ...
                if (getState() == State.RECEIVING_CONTENT) {
                    setState(State.DONE, null);
                }
            } else if (getState() != State.DONE) {
                if (getState() == State.SENDING_REQUEST) {
                    LOG.warn("Connection:[" + getId() + "] URL:" + _url
                            + " POSSIBLE TRUNCATION: Read returned -1 with ContentLength:" + _contentLength
                            + " BufferSize:" + _inBuf.available() + " DownloadSize:" + _downloadedContentLength
                            + " PrevState:" + getState() + " Sent:" + _totalWritten + " OutBufDataAvail:"
                            + _outBuf.available() + " Context:" + _context);
                    setState(State.RECEIVING_HEADERS, null);
                    processIncomingData(0);
                } else if (getState() == State.RECEIVING_CONTENT && _downloadedContentLength != 0) {
                    LOG.warn("Connection:[" + getId() + "] URL:" + _url
                            + " POSSIBLE TRUNCATION: Read returned -1 with ContentLength:" + _contentLength
                            + " BufferSize:" + _inBuf.available() + " DownloadSize:" + _downloadedContentLength
                            + " State:" + getState() + "Context:" + _context);
                    setState(State.DONE, null);
                } else {
                    LOG.error("Connection:[" + getId() + "] URL:" + _url
                            + " Read returned -1 with ContentLength:" + _contentLength + " BufferSize:"
                            + _inBuf.available() + " DownloadSize:" + _downloadedContentLength + " State:"
                            + getState() + "Context:" + _context);

                    setErrorType(ErrorType.IOEXCEPTION);
                    setErrorDesc("Read returned -1 with ContentLength:" + _contentLength + " BufferSize:"
                            + _inBuf.available() + " DownloadSize:" + _downloadedContentLength + " State:"
                            + getState());
                    setState(State.ERROR, new java.net.SocketException());
                }
            }
        }
    } catch (IOException e) {
        LOG.error("Connection:[" + getId() + "] Readable for url:" + getURL() + " threw Exception:"
                + e.getMessage());

        setErrorType(ErrorType.IOEXCEPTION);
        setErrorDesc(StringUtils.stringifyException(e));

        setState(State.ERROR, e);

    }
    if (_socket.isOpen()) {
        // if we data to write ...
        if (_outBuf.isDataAvailable()) {
            _selector.registerForReadAndWrite(theSocket);
        } else {
            _selector.registerForRead(theSocket);
        }
    }

    if (totalBytesRead > 0) {
        // update last read time ...
        _lastReadOrWriteTime = System.currentTimeMillis();
    }

    return (disconnected) ? -1 : totalBytesRead;
}

From source file:org.apache.cassandra.db.compaction.CompactionManager.java

private void scrubOne(ColumnFamilyStore cfs, SSTableReader sstable) throws IOException {
    logger.info("Scrubbing " + sstable);
    CompactionController controller = new CompactionController(cfs, Collections.singletonList(sstable),
            getDefaultGcBefore(cfs), true);
    boolean isCommutative = cfs.metadata.getDefaultValidator().isCommutative();

    // Calculate the expected compacted filesize
    String compactionFileLocation = cfs.table.getDataFileLocation(sstable.length());
    if (compactionFileLocation == null)
        throw new IOException("disk full");
    int expectedBloomFilterSize = Math.max(DatabaseDescriptor.getIndexInterval(),
            (int) (SSTableReader.getApproximateKeyCount(Arrays.asList(sstable))));

    // loop through each row, deserializing to check for damage.
    // we'll also loop through the index at the same time, using the position from the index to recover if the
    // row header (key or data size) is corrupt. (This means our position in the index file will be one row
    // "ahead" of the data file.)
    final BufferedRandomAccessFile dataFile = BufferedRandomAccessFile
            .getUncachingReader(sstable.getFilename());
    String indexFilename = sstable.descriptor.filenameFor(Component.PRIMARY_INDEX);
    BufferedRandomAccessFile indexFile = BufferedRandomAccessFile.getUncachingReader(indexFilename);
    try {// ww w  .  j  ava2s  . c  o  m
        ByteBuffer nextIndexKey = ByteBufferUtil.readWithShortLength(indexFile);
        {
            // throw away variable so we don't have a side effect in the assert
            long firstRowPositionFromIndex = indexFile.readLong();
            assert firstRowPositionFromIndex == 0 : firstRowPositionFromIndex;
        }

        SSTableWriter writer = maybeCreateWriter(cfs, compactionFileLocation, expectedBloomFilterSize, null,
                Collections.singletonList(sstable));
        executor.beginCompaction(new ScrubInfo(dataFile, sstable));
        int goodRows = 0, badRows = 0, emptyRows = 0;

        while (!dataFile.isEOF()) {
            long rowStart = dataFile.getFilePointer();
            if (logger.isDebugEnabled())
                logger.debug("Reading row at " + rowStart);

            DecoratedKey key = null;
            long dataSize = -1;
            try {
                key = SSTableReader.decodeKey(sstable.partitioner, sstable.descriptor,
                        ByteBufferUtil.readWithShortLength(dataFile));
                dataSize = sstable.descriptor.hasIntRowSize ? dataFile.readInt() : dataFile.readLong();
                if (logger.isDebugEnabled())
                    logger.debug(
                            String.format("row %s is %s bytes", ByteBufferUtil.bytesToHex(key.key), dataSize));
            } catch (Throwable th) {
                throwIfFatal(th);
                // check for null key below
            }

            ByteBuffer currentIndexKey = nextIndexKey;
            long nextRowPositionFromIndex;
            try {
                nextIndexKey = indexFile.isEOF() ? null : ByteBufferUtil.readWithShortLength(indexFile);
                nextRowPositionFromIndex = indexFile.isEOF() ? dataFile.length() : indexFile.readLong();
            } catch (Throwable th) {
                logger.warn("Error reading index file", th);
                nextIndexKey = null;
                nextRowPositionFromIndex = dataFile.length();
            }

            long dataStart = dataFile.getFilePointer();
            long dataStartFromIndex = currentIndexKey == null ? -1
                    : rowStart + 2 + currentIndexKey.remaining() + (sstable.descriptor.hasIntRowSize ? 4 : 8);
            long dataSizeFromIndex = nextRowPositionFromIndex - dataStartFromIndex;
            assert currentIndexKey != null || indexFile.isEOF();
            if (logger.isDebugEnabled() && currentIndexKey != null)
                logger.debug(String.format("Index doublecheck: row %s is %s bytes",
                        ByteBufferUtil.bytesToHex(currentIndexKey), dataSizeFromIndex));

            writer.mark();
            try {
                if (key == null)
                    throw new IOError(new IOException("Unable to read row key from data file"));
                if (dataSize > dataFile.length())
                    throw new IOError(new IOException("Impossible row size " + dataSize));
                SSTableIdentityIterator row = new SSTableIdentityIterator(sstable, dataFile, key, dataStart,
                        dataSize, true);
                AbstractCompactedRow compactedRow = controller.getCompactedRow(row);
                if (compactedRow.isEmpty()) {
                    emptyRows++;
                } else {
                    writer.append(compactedRow);
                    goodRows++;
                }
                if (!key.key.equals(currentIndexKey) || dataStart != dataStartFromIndex)
                    logger.warn("Index file contained a different key or row size; using key from data file");
            } catch (Throwable th) {
                throwIfFatal(th);
                logger.warn("Non-fatal error reading row (stacktrace follows)", th);
                writer.reset();

                if (currentIndexKey != null && (key == null || !key.key.equals(currentIndexKey)
                        || dataStart != dataStartFromIndex || dataSize != dataSizeFromIndex)) {
                    logger.info(String.format("Retrying from row index; data is %s bytes starting at %s",
                            dataSizeFromIndex, dataStartFromIndex));
                    key = SSTableReader.decodeKey(sstable.partitioner, sstable.descriptor, currentIndexKey);
                    try {
                        SSTableIdentityIterator row = new SSTableIdentityIterator(sstable, dataFile, key,
                                dataStartFromIndex, dataSizeFromIndex, true);
                        AbstractCompactedRow compactedRow = controller.getCompactedRow(row);
                        if (compactedRow.isEmpty()) {
                            emptyRows++;
                        } else {
                            writer.append(compactedRow);
                            goodRows++;
                        }
                    } catch (Throwable th2) {
                        throwIfFatal(th2);
                        // Skipping rows is dangerous for counters (see CASSANDRA-2759)
                        if (isCommutative)
                            throw new IOError(th2);

                        logger.warn("Retry failed too.  Skipping to next row (retry's stacktrace follows)",
                                th2);
                        writer.reset();
                        dataFile.seek(nextRowPositionFromIndex);
                        badRows++;
                    }
                } else {
                    // Skipping rows is dangerous for counters (see CASSANDRA-2759)
                    if (isCommutative)
                        throw new IOError(th);

                    logger.warn("Row at " + dataStart + " is unreadable; skipping to next");
                    if (currentIndexKey != null)
                        dataFile.seek(nextRowPositionFromIndex);
                    badRows++;
                }
            }
        }

        if (writer.getFilePointer() > 0) {
            SSTableReader newSstable = writer.closeAndOpenReader(sstable.maxDataAge);
            cfs.replaceCompactedSSTables(Arrays.asList(sstable), Arrays.asList(newSstable));
            logger.info("Scrub of " + sstable + " complete: " + goodRows + " rows in new sstable and "
                    + emptyRows + " empty (tombstoned) rows dropped");
            if (badRows > 0)
                logger.warn("Unable to recover " + badRows
                        + " rows that were skipped.  You can attempt manual recovery from the pre-scrub snapshot.  You can also run nodetool repair to transfer the data from a healthy replica, if any");
        } else {
            cfs.markCompacted(Arrays.asList(sstable));
            if (badRows > 0)
                logger.warn("No valid rows found while scrubbing " + sstable
                        + "; it is marked for deletion now. If you want to attempt manual recovery, you can find a copy in the pre-scrub snapshot");
            else
                logger.info("Scrub of " + sstable + " complete; looks like all " + emptyRows
                        + " rows were tombstoned");
        }
    } finally {
        FileUtils.closeQuietly(dataFile);
        FileUtils.closeQuietly(indexFile);
    }
}

From source file:org.apache.cassandra.db.CompactionManager.java

/**
 * Deserialize everything in the CFS and re-serialize w/ the newest version.  Also attempts to recover
 * from bogus row keys / sizes using data from the index, and skips rows with garbage columns that resulted
 * from early ByteBuffer bugs.//from w w w.j ava2s.c  o  m
 *
 * @throws IOException
 */
private void doScrub(ColumnFamilyStore cfs, Collection<SSTableReader> sstables) throws IOException {
    assert !cfs.isIndex();

    for (final SSTableReader sstable : sstables) {
        logger.info("Scrubbing " + sstable);

        // Calculate the expected compacted filesize
        String compactionFileLocation = cfs.table.getDataFileLocation(sstable.length());
        if (compactionFileLocation == null)
            throw new IOException("disk full");
        int expectedBloomFilterSize = Math.max(DatabaseDescriptor.getIndexInterval(),
                (int) (SSTableReader.getApproximateKeyCount(Arrays.asList(sstable))));

        // loop through each row, deserializing to check for damage.
        // we'll also loop through the index at the same time, using the position from the index to recover if the
        // row header (key or data size) is corrupt. (This means our position in the index file will be one row
        // "ahead" of the data file.)
        final BufferedRandomAccessFile dataFile = BufferedRandomAccessFile
                .getUncachingReader(sstable.getFilename());
        String indexFilename = sstable.descriptor.filenameFor(Component.PRIMARY_INDEX);
        BufferedRandomAccessFile indexFile = BufferedRandomAccessFile.getUncachingReader(indexFilename);
        ByteBuffer nextIndexKey = ByteBufferUtil.readWithShortLength(indexFile);
        {
            // throw away variable so we don't have a side effect in the assert
            long firstRowPositionFromIndex = indexFile.readLong();
            assert firstRowPositionFromIndex == 0 : firstRowPositionFromIndex;
        }

        SSTableWriter writer = maybeCreateWriter(cfs, compactionFileLocation, expectedBloomFilterSize, null,
                Collections.singletonList(sstable));
        executor.beginCompaction(new ScrubInfo(dataFile, sstable));
        int goodRows = 0, badRows = 0, emptyRows = 0;

        while (!dataFile.isEOF()) {
            long rowStart = dataFile.getFilePointer();
            if (logger.isDebugEnabled())
                logger.debug("Reading row at " + rowStart);

            DecoratedKey key = null;
            long dataSize = -1;
            try {
                key = SSTableReader.decodeKey(sstable.partitioner, sstable.descriptor,
                        ByteBufferUtil.readWithShortLength(dataFile));
                dataSize = sstable.descriptor.hasIntRowSize ? dataFile.readInt() : dataFile.readLong();
                if (logger.isDebugEnabled())
                    logger.debug(
                            String.format("row %s is %s bytes", ByteBufferUtil.bytesToHex(key.key), dataSize));
            } catch (Throwable th) {
                throwIfFatal(th);
                // check for null key below
            }

            ByteBuffer currentIndexKey = nextIndexKey;
            long nextRowPositionFromIndex;
            try {
                nextIndexKey = indexFile.isEOF() ? null : ByteBufferUtil.readWithShortLength(indexFile);
                nextRowPositionFromIndex = indexFile.isEOF() ? dataFile.length() : indexFile.readLong();
            } catch (Throwable th) {
                logger.warn("Error reading index file", th);
                nextIndexKey = null;
                nextRowPositionFromIndex = dataFile.length();
            }

            long dataStart = dataFile.getFilePointer();
            long dataStartFromIndex = currentIndexKey == null ? -1
                    : rowStart + 2 + currentIndexKey.remaining() + (sstable.descriptor.hasIntRowSize ? 4 : 8);
            long dataSizeFromIndex = nextRowPositionFromIndex - dataStartFromIndex;
            assert currentIndexKey != null || indexFile.isEOF();
            if (logger.isDebugEnabled() && currentIndexKey != null)
                logger.debug(String.format("Index doublecheck: row %s is %s bytes",
                        ByteBufferUtil.bytesToHex(currentIndexKey), dataSizeFromIndex));

            writer.mark();
            try {
                if (key == null)
                    throw new IOError(new IOException("Unable to read row key from data file"));
                if (dataSize > dataFile.length())
                    throw new IOError(new IOException("Impossible row size " + dataSize));
                SSTableIdentityIterator row = new SSTableIdentityIterator(sstable, dataFile, key, dataStart,
                        dataSize, true);
                AbstractCompactedRow compactedRow = getCompactedRow(row, sstable.descriptor, true);
                if (compactedRow.isEmpty()) {
                    emptyRows++;
                } else {
                    writer.append(compactedRow);
                    goodRows++;
                }
                if (!key.key.equals(currentIndexKey) || dataStart != dataStartFromIndex)
                    logger.warn(
                            "Row scrubbed successfully but index file contains a different key or row size; consider rebuilding the index as described in http://www.mail-archive.com/user@cassandra.apache.org/msg03325.html");
            } catch (Throwable th) {
                throwIfFatal(th);
                logger.warn("Non-fatal error reading row (stacktrace follows)", th);
                writer.reset();

                if (currentIndexKey != null && (key == null || !key.key.equals(currentIndexKey)
                        || dataStart != dataStartFromIndex || dataSize != dataSizeFromIndex)) {
                    logger.info(String.format("Retrying from row index; data is %s bytes starting at %s",
                            dataSizeFromIndex, dataStartFromIndex));
                    key = SSTableReader.decodeKey(sstable.partitioner, sstable.descriptor, currentIndexKey);
                    try {
                        SSTableIdentityIterator row = new SSTableIdentityIterator(sstable, dataFile, key,
                                dataStartFromIndex, dataSizeFromIndex, true);
                        AbstractCompactedRow compactedRow = getCompactedRow(row, sstable.descriptor, true);
                        if (compactedRow.isEmpty()) {
                            emptyRows++;
                        } else {
                            writer.append(compactedRow);
                            goodRows++;
                        }
                    } catch (Throwable th2) {
                        throwIfFatal(th2);
                        logger.warn("Retry failed too.  Skipping to next row (retry's stacktrace follows)",
                                th2);
                        writer.reset();
                        dataFile.seek(nextRowPositionFromIndex);
                        badRows++;
                    }
                } else {
                    logger.warn("Row at " + dataStart + " is unreadable; skipping to next");
                    if (currentIndexKey != null)
                        dataFile.seek(nextRowPositionFromIndex);
                    badRows++;
                }
            }
        }

        if (writer.getFilePointer() > 0) {
            SSTableReader newSstable = writer.closeAndOpenReader(sstable.maxDataAge);
            cfs.replaceCompactedSSTables(Arrays.asList(sstable), Arrays.asList(newSstable));
            logger.info("Scrub of " + sstable + " complete: " + goodRows + " rows in new sstable and "
                    + emptyRows + " empty (tombstoned) rows dropped");
            if (badRows > 0)
                logger.warn("Unable to recover " + badRows
                        + " rows that were skipped.  You can attempt manual recovery from the pre-scrub snapshot.  You can also run nodetool repair to transfer the data from a healthy replica, if any");
        } else {
            cfs.markCompacted(Arrays.asList(sstable));
            if (badRows > 0)
                logger.warn("No valid rows found while scrubbing " + sstable
                        + "; it is marked for deletion now. If you want to attempt manual recovery, you can find a copy in the pre-scrub snapshot");
            else
                logger.info("Scrub of " + sstable + " complete; looks like all " + emptyRows
                        + " rows were tombstoned");
        }
    }
}

From source file:com.mellanox.r4h.DFSInputStream.java

@Override
public synchronized int read(final ByteBuffer buf) throws IOException {
    ReaderStrategy byteBufferReader = new ByteBufferStrategy(buf);
    TraceScope scope = dfsClient.getPathTraceScope("DFSInputStream#byteBufferRead", src);
    try {//from  w w w . j  a va2 s  .  co  m
        return readWithStrategy(byteBufferReader, 0, buf.remaining());
    } finally {
        scope.close();
    }
}

From source file:org.apache.bookkeeper.bookie.Bookie.java

/**
 * Add entry to a ledger./*from   ww  w .  ja  va2 s  .com*/
 * @throws BookieException.LedgerFencedException if the ledger is fenced
 */
public void addEntry(ByteBuffer entry, WriteCallback cb, Object ctx, byte[] masterKey)
        throws IOException, BookieException {
    long requestNanos = MathUtils.nowInNano();
    boolean success = false;
    int entrySize = 0;
    try {
        LedgerDescriptor handle = getLedgerForEntry(entry, masterKey);
        synchronized (handle) {
            if (handle.isFenced()) {
                throw BookieException.create(BookieException.Code.LedgerFencedException);
            }
            entrySize = entry.remaining();
            addEntryInternal(handle, entry, cb, ctx);
        }
        success = true;
    } catch (NoWritableLedgerDirException e) {
        transitionToReadOnlyMode();
        throw new IOException(e);
    } finally {
        long elapsedNanos = MathUtils.elapsedNanos(requestNanos);
        if (success) {
            addEntryStats.registerSuccessfulEvent(elapsedNanos, TimeUnit.NANOSECONDS);
            addBytesStats.registerSuccessfulValue(entrySize);
        } else {
            addEntryStats.registerFailedEvent(elapsedNanos, TimeUnit.NANOSECONDS);
            addBytesStats.registerFailedValue(entrySize);
        }
    }
}

From source file:com.robonobo.eon.SEONConnection.java

private void fetchMoreData() {
    ByteBuffer buf = dataProvider.getMoreData();
    if (buf == null) {
        if (debugLogging)
            log.debug(this + " fetching more data: returned null");
    } else {//from w  ww. j  a v  a  2  s. c  om
        if (debugLogging)
            log.debug(this + " fetching more data: returned " + buf.remaining() + " bytes");
        outgoing.addBuffer(buf);
    }
}

From source file:byps.http.HWriteResponseHelper.java

public void writeResponse(ByteBuffer obuf, Throwable e, HttpServletResponse resp, boolean isAsync)
        throws IOException {
    if (log.isDebugEnabled())
        log.debug("writeResponse(" + obuf + ", exception=" + e + ", resp=" + resp);

    if (resp == null) {
        if (log.isDebugEnabled())
            log.debug(")writeResponse timeout");
        return; // timeout
    }/*w w  w .ja v  a2 s .  co  m*/

    if (listener != null) {
        if (log.isDebugEnabled())
            log.debug("call onBefore-listener");
        if (listener.onBeforeWriteHttpResponse(obuf, e, resp, isAsync)) {
            if (log.isDebugEnabled())
                log.debug(")writeResponse, onBefore-listener has written the response.");
        }
    }

    if (e != null) {

        int status = HttpServletResponse.SC_INTERNAL_SERVER_ERROR;
        if (e instanceof BException) {
            BException bex = (BException) e;
            if (bex.code == BExceptionC.CANCELLED) {
                status = HttpServletResponse.SC_NOT_ACCEPTABLE;
            } else if (bex.code == BExceptionC.RESEND_LONG_POLL) {
                status = HttpServletResponse.SC_NO_CONTENT;
            }
        }

        if (status == HttpServletResponse.SC_INTERNAL_SERVER_ERROR) {
            log.warn("Responding server error.", e);
        }

        resp.setStatus(status);

        PrintWriter wr = resp.getWriter();
        String errmsg = e.toString(); // (e instanceof BException) ?
                                      // ((BException)e).msg : e.toString();
        wr.print(errmsg);
        wr.close();

    } else {

        if (log.isDebugEnabled())
            log.debug("copy to servlet output");
        boolean isJson = BMessageHeader.detectProtocol(obuf) == BMessageHeader.MAGIC_JSON;
        resp.setContentType(isJson ? "application/json; charset=UTF-8" : "application/byps");
        resp.setContentLength(obuf.remaining());
        OutputStream os = resp.getOutputStream();

        if (log.isDebugEnabled()) {
            log.debug("buffer: \r\n" + BBuffer.toDetailString(obuf));
        }

        if (isAsync) {

            // Tomcat does not throw an IOException in asynchronous requests, if the
            // client
            // has closed the socket. Somewhere on stackoverflow.com I found a hack
            // to workaround this bug. The workaround splits the message into two
            // parts and calls flush() after each part. The second flush throws the
            // expected exception. But the author of this workaround mentioned, that
            // it does not work in all cases - and I confirm to him.
            // http://stackoverflow.com/questions/7124508/how-to-properly-detect-a-client-disconnect-in-servlet-spec-3
            int pos = obuf.position(), len = obuf.remaining() / 2;
            os.write(obuf.array(), pos, len);
            os.flush();
            os.write(obuf.array(), pos + len, obuf.remaining() - len);
            os.flush();
        } else {
            os.write(obuf.array(), obuf.position(), obuf.remaining());
        }

        os.close();

        if (listener != null) {
            if (log.isDebugEnabled())
                log.debug("call onAfter-listener");
            listener.onAfterWriteHttpResponse(obuf.remaining());
        }

    }
    if (log.isDebugEnabled())
        log.debug(")writeResponse");
}

From source file:edu.hawaii.soest.kilonalu.flntu.FLNTUSource.java

/**
 * A method that executes the streaming of data from the source to the RBNB
 * server after all configuration of settings, connections to hosts, and
 * thread initiatizing occurs.  This method contains the detailed code for 
 * streaming the data and interpreting the stream.
 *//*from  ww w . jav  a 2  s  .co m*/
protected boolean execute() {
    logger.debug("FLNTUSource.execute() called.");
    // do not execute the stream if there is no connection
    if (!isConnected())
        return false;

    boolean failed = false;

    SocketChannel socket = getSocketConnection();

    // while data are being sent, read them into the buffer
    try {
        // create four byte placeholders used to evaluate up to a four-byte 
        // window.  The FIFO layout looks like:
        //           -------------------------
        //   in ---> | One | Two |Three|Four |  ---> out
        //           -------------------------
        byte byteOne = 0x00, // set initial placeholder values
                byteTwo = 0x00, byteThree = 0x00, byteFour = 0x00;

        // Create a buffer that will store the sample bytes as they are read
        ByteBuffer sampleBuffer = ByteBuffer.allocate(getBufferSize());

        // create a byte buffer to store bytes from the TCP stream
        ByteBuffer buffer = ByteBuffer.allocateDirect(getBufferSize());

        // add a channel of data that will be pushed to the server.  
        // Each sample will be sent to the Data Turbine as an rbnb frame.
        ChannelMap rbnbChannelMap = new ChannelMap();

        // while there are bytes to read from the socket ...
        while (socket.read(buffer) != -1 || buffer.position() > 0) {

            // prepare the buffer for reading
            buffer.flip();

            // while there are unread bytes in the ByteBuffer
            while (buffer.hasRemaining()) {
                byteOne = buffer.get();
                logger.debug("char: " + (char) byteOne + "\t" + "b1: "
                        + new String(Hex.encodeHex((new byte[] { byteOne }))) + "\t" + "b2: "
                        + new String(Hex.encodeHex((new byte[] { byteTwo }))) + "\t" + "b3: "
                        + new String(Hex.encodeHex((new byte[] { byteThree }))) + "\t" + "b4: "
                        + new String(Hex.encodeHex((new byte[] { byteFour }))) + "\t" + "sample pos: "
                        + sampleBuffer.position() + "\t" + "sample rem: " + sampleBuffer.remaining() + "\t"
                        + "sample cnt: " + sampleByteCount + "\t" + "buffer pos: " + buffer.position() + "\t"
                        + "buffer rem: " + buffer.remaining() + "\t" + "state: " + state);

                // Use a State Machine to process the byte stream.
                switch (state) {

                case 0:

                    // sample sets begin with 'mvs 1\r\n' and end with 'mvs 0\r\n'.  Find the 
                    // beginning of the sample set using the 4-byte window (s 1\r\n)
                    // note bytes are in reverse order in the FIFO window
                    if (byteOne == 0x0A && byteTwo == 0x0D && byteThree == 0x31 && byteFour == 0x20) {
                        // we've found the beginning of a sample set, move on
                        state = 1;
                        break;

                    } else {
                        break;
                    }

                case 1: // read the rest of the bytes to the next EOL characters

                    // sample line is terminated by record delimiter byte (\r\n)
                    // note bytes are in reverse order in the FIFO window
                    if (byteOne == 0x0A && byteTwo == 0x0D && byteThree == 0x30 && byteFour == 0x20) {

                        // we've found the sample set ending, clear buffers and return
                        // to state 0 to wait for the next set
                        byteOne = 0x00;
                        byteTwo = 0x00;
                        byteThree = 0x00;
                        byteFour = 0x00;
                        sampleBuffer.clear();
                        sampleByteCount = 0;
                        rbnbChannelMap.Clear();
                        logger.debug("Cleared b1,b2,b3,b4. Cleared sampleBuffer. Cleared rbnbChannelMap.");
                        state = 0;

                        // if we're not at the sample set end, look for individual samples    
                    } else if (byteOne == 0x0A && byteTwo == 0x0D) {

                        // found the sample ending delimiter
                        // add in the sample delimiter to the sample buffer
                        if (sampleBuffer.remaining() > 0) {
                            sampleBuffer.put(byteOne);
                            sampleByteCount++;
                        } else {
                            sampleBuffer.compact();
                            logger.debug("Compacting sampleBuffer ...");
                            sampleBuffer.put(byteOne);
                            sampleByteCount++;

                        }

                        // extract just the length of the sample bytes out of the
                        // sample buffer, and place it in the channel map as a 
                        // byte array.  Then, send it to the data turbine.
                        byte[] sampleArray = new byte[sampleByteCount];
                        sampleBuffer.flip();
                        sampleBuffer.get(sampleArray);

                        // send the sample to the data turbine
                        rbnbChannelMap.PutTimeAuto("server");
                        String sampleString = new String(sampleArray, "US-ASCII");
                        int channelIndex = rbnbChannelMap.Add(getRBNBChannelName());
                        rbnbChannelMap.PutMime(channelIndex, "text/plain");
                        rbnbChannelMap.PutDataAsString(channelIndex, sampleString);
                        getSource().Flush(rbnbChannelMap);
                        logger.info("Sample: " + sampleString.substring(0, sampleString.length() - 2)
                                + " sent data to the DataTurbine. ");
                        byteOne = 0x00;
                        byteTwo = 0x00;
                        byteThree = 0x00;
                        byteFour = 0x00;
                        sampleBuffer.clear();
                        sampleByteCount = 0;
                        rbnbChannelMap.Clear();
                        logger.debug("Cleared b1,b2,b3,b4. Cleared sampleBuffer. Cleared rbnbChannelMap.");
                        break;

                    } else { // not 0x0

                        // still in the middle of the sample, keep adding bytes
                        sampleByteCount++; // add each byte found

                        if (sampleBuffer.remaining() > 0) {
                            sampleBuffer.put(byteOne);
                        } else {
                            sampleBuffer.compact();
                            logger.debug("Compacting sampleBuffer ...");
                            sampleBuffer.put(byteOne);

                        }

                        break;
                    } // end if for 0x0D20 EOL

                } // end switch statement

                // shift the bytes in the FIFO window
                byteFour = byteThree;
                byteThree = byteTwo;
                byteTwo = byteOne;

            } //end while (more unread bytes)

            // prepare the buffer to read in more bytes from the stream
            buffer.compact();

        } // end while (more socket bytes to read)
        socket.close();

    } catch (IOException e) {
        // handle exceptions
        // In the event of an i/o exception, log the exception, and allow execute()
        // to return false, which will prompt a retry.
        failed = true;
        e.printStackTrace();
        return !failed;
    } catch (SAPIException sapie) {
        // In the event of an RBNB communication  exception, log the exception, 
        // and allow execute() to return false, which will prompt a retry.
        failed = true;
        sapie.printStackTrace();
        return !failed;
    }

    return !failed;
}

From source file:com.healthmarketscience.jackcess.Table.java

/**
 * Writes a new table defined by the given TableCreator to the database.
 * @usage _advanced_method_// w  ww. ja  va 2s  .co m
 */
protected static void writeTableDefinition(TableCreator creator) throws IOException {
    // first, create the usage map page
    createUsageMapDefinitionBuffer(creator);

    // next, determine how big the table def will be (in case it will be more
    // than one page)
    JetFormat format = creator.getFormat();
    int idxDataLen = (creator.getIndexCount() * (format.SIZE_INDEX_DEFINITION + format.SIZE_INDEX_COLUMN_BLOCK))
            + (creator.getLogicalIndexCount() * format.SIZE_INDEX_INFO_BLOCK);
    int totalTableDefSize = format.SIZE_TDEF_HEADER
            + (format.SIZE_COLUMN_DEF_BLOCK * creator.getColumns().size()) + idxDataLen
            + format.SIZE_TDEF_TRAILER;

    // total up the amount of space used by the column and index names (2
    // bytes per char + 2 bytes for the length)
    for (Column col : creator.getColumns()) {
        int nameByteLen = (col.getName().length() * JetFormat.TEXT_FIELD_UNIT_SIZE);
        totalTableDefSize += nameByteLen + 2;
    }

    for (IndexBuilder idx : creator.getIndexes()) {
        int nameByteLen = (idx.getName().length() * JetFormat.TEXT_FIELD_UNIT_SIZE);
        totalTableDefSize += nameByteLen + 2;
    }

    // now, create the table definition
    PageChannel pageChannel = creator.getPageChannel();
    ByteBuffer buffer = pageChannel.createBuffer(Math.max(totalTableDefSize, format.PAGE_SIZE));
    writeTableDefinitionHeader(creator, buffer, totalTableDefSize);

    if (creator.hasIndexes()) {
        // index row counts
        IndexData.writeRowCountDefinitions(creator, buffer);
    }

    // column definitions
    Column.writeDefinitions(creator, buffer);

    if (creator.hasIndexes()) {
        // index and index data definitions
        IndexData.writeDefinitions(creator, buffer);
        Index.writeDefinitions(creator, buffer);
    }

    //End of tabledef
    buffer.put((byte) 0xff);
    buffer.put((byte) 0xff);

    // write table buffer to database
    if (totalTableDefSize <= format.PAGE_SIZE) {

        // easy case, fits on one page
        buffer.putShort(format.OFFSET_FREE_SPACE, (short) (buffer.remaining() - 8)); // overwrite page free space
        // Write the tdef page to disk.
        pageChannel.writePage(buffer, creator.getTdefPageNumber());

    } else {

        // need to split across multiple pages
        ByteBuffer partialTdef = pageChannel.createPageBuffer();
        buffer.rewind();
        int nextTdefPageNumber = PageChannel.INVALID_PAGE_NUMBER;
        while (buffer.hasRemaining()) {

            // reset for next write
            partialTdef.clear();

            if (nextTdefPageNumber == PageChannel.INVALID_PAGE_NUMBER) {

                // this is the first page.  note, the first page already has the
                // page header, so no need to write it here
                nextTdefPageNumber = creator.getTdefPageNumber();

            } else {

                // write page header
                writeTablePageHeader(partialTdef);
            }

            // copy the next page of tdef bytes
            int curTdefPageNumber = nextTdefPageNumber;
            int writeLen = Math.min(partialTdef.remaining(), buffer.remaining());
            partialTdef.put(buffer.array(), buffer.position(), writeLen);
            ByteUtil.forward(buffer, writeLen);

            if (buffer.hasRemaining()) {
                // need a next page
                nextTdefPageNumber = pageChannel.allocateNewPage();
                partialTdef.putInt(format.OFFSET_NEXT_TABLE_DEF_PAGE, nextTdefPageNumber);
            }

            // update page free space
            partialTdef.putShort(format.OFFSET_FREE_SPACE, (short) (partialTdef.remaining() - 8)); // overwrite page free space

            // write partial page to disk
            pageChannel.writePage(partialTdef, curTdefPageNumber);
        }

    }
}