Example usage for java.nio ByteBuffer limit

List of usage examples for java.nio ByteBuffer limit

Introduction

In this page you can find the example usage for java.nio ByteBuffer limit.

Prototype

public final int limit() 

Source Link

Document

Returns the limit of this buffer.

Usage

From source file:edu.uci.ics.crawler4j.crawler.fetcher.PageFetcher.java

private boolean loadPage(final Page p, final InputStream in, final int totalsize, final boolean isBinary,
        String encoding) {//  w  w  w .j av a 2 s .c om
    ByteBuffer bBuf;

    if (totalsize > 0) {
        bBuf = ByteBuffer.allocate(totalsize + 1024);
    } else {
        bBuf = ByteBuffer.allocate(maxDownloadSize);
    }
    final byte[] b = new byte[1024];
    int len;
    double finished = 0;
    try {
        while ((len = in.read(b)) != -1) {
            if (finished + b.length > bBuf.capacity()) {
                break;
            }
            bBuf.put(b, 0, len);
            finished += len;
        }
    } catch (final BufferOverflowException boe) {
        System.out.println("Page size exceeds maximum allowed.");
        return false;
    } catch (final Exception e) {
        System.err.println(e.getMessage());
        return false;
    }

    bBuf.flip();
    if (isBinary) {
        byte[] tmp = new byte[bBuf.limit()];
        bBuf.get(tmp);
        p.setBinaryData(tmp);
    } else {
        String html = "";
        if (encoding == null) {
            int pos = bBuf.position();
            html = Charset.forName("US-ASCII").decode(bBuf).toString();
            bBuf.position(pos);
            pos = html.toLowerCase().indexOf("<meta http-equiv=\"content-type\" content=\"");
            if (pos >= 0) {
                int end = html.indexOf("\"", pos + 41);
                if (end >= 0) {
                    String content = html.substring(pos, end);
                    if (content.contains("charset=")) {
                        encoding = content.substring(content.indexOf("charset=") + 8);
                    }
                }
            }
        }
        if (encoding == null || !Charset.isSupported(encoding))
            encoding = "UTF-8";

        if (!encoding.equals("UTF-8")) {
            html = Charset.forName(encoding).decode(bBuf).toString();
        }

        if (html.length() == 0) {
            return false;
        }
        p.setHTML(html);
    }
    return true;
}

From source file:com.slytechs.capture.file.editor.AbstractRawIterator.java

/**
 * Adds a new record using two buffers. This method is more efficient then
 * using {@link #addAll(ByteBuffer[])} version as the two buffers are received
 * as normal paramters. This version of the signature is used when record's
 * header and content reside in two separate buffers.
 * /*  ww  w.j a v a 2s  .c o m*/
 * @param b1
 *          first buffer containing the record's header
 * @param b2
 *          second buffer containing the record's content
 * @throws IOException
 *           any IO errors
 */
public void add(final ByteBuffer b1, final ByteBuffer b2) throws IOException {
    final long length = (b1.limit() - b1.position()) + (b2.limit() - b2.position());

    // Create a partial loader for our cache memory buffer and do the insert
    final PartialLoader record = new MemoryCacheLoader(b1, b2, headerReader);
    this.edits.insert(this.global, length, record);

    // Advance past the record we just added
    this.setPosition(this.global + length);

    this.autoflush.autoflushChange(length);
}

From source file:nextflow.fs.dx.DxUploadOutputStream.java

@SuppressWarnings("unchecked")
private void consumeBuffer(final ByteBuffer buffer, final int chunkIndex) throws IOException {
    log.debug("File: {} > uploading chunk: {}", fileId, chunkIndex);

    // request to upload a new chunk
    // note: dnanexus upload chunk index is 1-based
    Map<String, Object> upload = remote.fileUpload(fileId, chunkIndex);
    log.trace("File: {} > chunk [{}] > FileUpload: {}", fileId, chunkIndex, upload);

    // the response provide the url when 'post' the chunk and the
    // 'authorization' code
    String url = (String) upload.get("url");
    Map<String, Object> headers = (Map<String, Object>) upload.get("headers");
    String auth = (String) headers.get("Authorization");

    // create a 'post' request to upload the stuff
    HttpPost post = new HttpPost(url);
    post.setHeader("Authorization", auth);

    log.trace("File: {} > chunk [{}] > buffer limit: {}; remaining: {}", fileId, chunkIndex, buffer.limit(),
            buffer.remaining());//from w w w  .  jav  a 2s. c o m

    HttpEntity payload = new InputStreamEntity(new ByteBufferBackedInputStream(buffer), buffer.limit());
    post.setEntity(payload);

    //        HttpClient client = new DefaultHttpClient();
    //        client.getParams().setParameter(CoreProtocolPNames.PROTOCOL_VERSION, HttpVersion.HTTP_1_1);
    //        log.trace("File: {} > chunk [{}] > Post starting: {}", fileId, chunkIndex, post);

    HttpEntity entity = DxHttpClient.getInstance().http().execute(post).getEntity();
    String response = EntityUtils.toString(entity, "UTF-8");
    log.trace("File: {} > chunk [{}] > post response: {}", fileId, chunkIndex, response);

    //        // close the client (maybe not really necessary)
    //        client.getConnectionManager().shutdown();
    // put the 'buffer' in the pool, so that it can be recycled
    bufferPool.offer(buffer);

    log.trace("File: {} > completed upload chunk: ", fileId, chunkIndex);
}

From source file:org.apache.storm.daemon.logviewer.handler.LogviewerLogSearchHandler.java

/**
 * As the file is read into a buffer, 1/2 the buffer's size at a time, we search the buffer for matches of the
 * substring and return a list of zero or more matches.
 *//*from w  w  w.  j a  v a  2s . c  o  m*/
private SubstringSearchResult bufferSubstringSearch(boolean isDaemon, File file, int fileLength,
        int offsetToBuf, int initBufOffset, BufferedInputStream stream, Integer bytesSkipped, int bytesRead,
        ByteBuffer haystack, byte[] needle, List<Map<String, Object>> initialMatches, Integer numMatches,
        byte[] beforeBytes) throws IOException {
    int bufOffset = initBufOffset;
    List<Map<String, Object>> matches = initialMatches;

    byte[] newBeforeBytes;
    Integer newByteOffset;

    while (true) {
        int offset = offsetOfBytes(haystack.array(), needle, bufOffset);
        if (matches.size() < numMatches && offset >= 0) {
            final int fileOffset = offsetToBuf + offset;
            final int bytesNeededAfterMatch = haystack.limit() - GREP_CONTEXT_SIZE - needle.length;

            byte[] beforeArg = null;
            byte[] afterArg = null;
            if (offset < GREP_CONTEXT_SIZE) {
                beforeArg = beforeBytes;
            }

            if (offset > bytesNeededAfterMatch) {
                afterArg = tryReadAhead(stream, haystack, offset, fileLength, bytesRead);
            }

            bufOffset = offset + needle.length;
            matches.add(mkMatchData(needle, haystack, offset, fileOffset, file.getCanonicalFile().toPath(),
                    isDaemon, beforeArg, afterArg));
        } else {
            int beforeStrToOffset = Math.min(haystack.limit(), GREP_MAX_SEARCH_SIZE);
            int beforeStrFromOffset = Math.max(0, beforeStrToOffset - GREP_CONTEXT_SIZE);
            newBeforeBytes = Arrays.copyOfRange(haystack.array(), beforeStrFromOffset, beforeStrToOffset);

            // It's OK if new-byte-offset is negative.
            // This is normal if we are out of bytes to read from a small file.
            if (matches.size() >= numMatches) {
                newByteOffset = ((Number) last(matches).get("byteOffset")).intValue() + needle.length;
            } else {
                newByteOffset = bytesSkipped + bytesRead - GREP_MAX_SEARCH_SIZE;
            }

            break;
        }
    }

    return new SubstringSearchResult(matches, newByteOffset, newBeforeBytes);
}

From source file:com.healthmarketscience.jackcess.UsageMap.java

/**
* @param database database that contains this usage map
* @param tableBuffer Buffer that contains this map's declaration
* @param pageNum Page number that this usage map is contained in
* @param rowStart Offset at which the declaration starts in the buffer
*//*from w  w w  .  j  a v a2  s.co m*/
private UsageMap(Database database, ByteBuffer tableBuffer, int pageNum, short rowStart) throws IOException {
    _database = database;
    _tableBuffer = tableBuffer;
    _tablePageNum = pageNum;
    _rowStart = rowStart;
    _tableBuffer.position(_rowStart + getFormat().OFFSET_USAGE_MAP_START);
    _startOffset = _tableBuffer.position();
    if (LOG.isDebugEnabled()) {
        LOG.debug("Usage map block:\n"
                + ByteUtil.toHexString(_tableBuffer, _rowStart, tableBuffer.limit() - _rowStart));
    }
}

From source file:org.apache.flume.channel.file.Log.java

/**
 * Log a take of an event, pointer points at the corresponding put
 *
 * Synchronization not required as this method is atomic
 * @param transactionID// w  w w.  ja  v a 2  s.c  o m
 * @param pointer
 * @throws IOException
 */
void take(long transactionID, FlumeEventPointer pointer) throws IOException {
    Preconditions.checkState(open, "Log is closed");
    Take take = new Take(transactionID, WriteOrderOracle.next(), pointer.getOffset(), pointer.getFileID());
    ByteBuffer buffer = TransactionEventRecord.toByteBuffer(take);
    int logFileIndex = nextLogWriter(transactionID);
    long usableSpace = logFiles.get(logFileIndex).getUsableSpace();
    long requiredSpace = minimumRequiredSpace + buffer.limit();
    if (usableSpace <= requiredSpace) {
        throw new IOException("Usable space exhaused, only " + usableSpace + " bytes remaining, required "
                + requiredSpace + " bytes");
    }
    boolean error = true;
    try {
        try {
            logFiles.get(logFileIndex).take(buffer);
            error = false;
        } catch (LogFileRetryableIOException e) {
            if (!open) {
                throw e;
            }
            roll(logFileIndex, buffer);
            logFiles.get(logFileIndex).take(buffer);
            error = false;
        }
    } finally {
        if (error && open) {
            roll(logFileIndex);
        }
    }
}

From source file:org.apache.flume.channel.file.Log.java

/**
 * Log a put of an event/*from   w  w  w  .j  a  v a 2  s . c o  m*/
 *
 * Synchronization not required as this method is atomic
 * @param transactionID
 * @param event
 * @return
 * @throws IOException
 */
FlumeEventPointer put(long transactionID, Event event) throws IOException {
    Preconditions.checkState(open, "Log is closed");
    FlumeEvent flumeEvent = new FlumeEvent(event.getHeaders(), event.getBody());
    Put put = new Put(transactionID, WriteOrderOracle.next(), flumeEvent);
    ByteBuffer buffer = TransactionEventRecord.toByteBuffer(put);
    int logFileIndex = nextLogWriter(transactionID);
    long usableSpace = logFiles.get(logFileIndex).getUsableSpace();
    long requiredSpace = minimumRequiredSpace + buffer.limit();
    if (usableSpace <= requiredSpace) {
        throw new IOException("Usable space exhaused, only " + usableSpace + " bytes remaining, required "
                + requiredSpace + " bytes");
    }
    boolean error = true;
    try {
        try {
            FlumeEventPointer ptr = logFiles.get(logFileIndex).put(buffer);
            error = false;
            return ptr;
        } catch (LogFileRetryableIOException e) {
            if (!open) {
                throw e;
            }
            roll(logFileIndex, buffer);
            FlumeEventPointer ptr = logFiles.get(logFileIndex).put(buffer);
            error = false;
            return ptr;
        }
    } finally {
        if (error && open) {
            roll(logFileIndex);
        }
    }
}

From source file:org.apache.flume.channel.file.Log.java

/**
 * Log a rollback of a transaction//from   www  .jav a  2 s. c o  m
 *
 * Synchronization not required as this method is atomic
 * @param transactionID
 * @throws IOException
 */
void rollback(long transactionID) throws IOException {
    Preconditions.checkState(open, "Log is closed");

    if (LOGGER.isDebugEnabled()) {
        LOGGER.debug("Rolling back " + transactionID);
    }
    Rollback rollback = new Rollback(transactionID, WriteOrderOracle.next());
    ByteBuffer buffer = TransactionEventRecord.toByteBuffer(rollback);
    int logFileIndex = nextLogWriter(transactionID);
    long usableSpace = logFiles.get(logFileIndex).getUsableSpace();
    long requiredSpace = minimumRequiredSpace + buffer.limit();
    if (usableSpace <= requiredSpace) {
        throw new IOException("Usable space exhaused, only " + usableSpace + " bytes remaining, required "
                + requiredSpace + " bytes");
    }
    boolean error = true;
    try {
        try {
            logFiles.get(logFileIndex).rollback(buffer);
            error = false;
        } catch (LogFileRetryableIOException e) {
            if (!open) {
                throw e;
            }
            roll(logFileIndex, buffer);
            logFiles.get(logFileIndex).rollback(buffer);
            error = false;
        }
    } finally {
        if (error && open) {
            roll(logFileIndex);
        }
    }
}

From source file:org.apache.flume.channel.file.Log.java

/**
 * Synchronization not required as this method is atomic
 *
 * @param transactionID/* ww w .j a  va 2 s .  c  o m*/
 * @param type
 * @throws IOException
 */
private void commit(long transactionID, short type) throws IOException {
    Preconditions.checkState(open, "Log is closed");
    Commit commit = new Commit(transactionID, WriteOrderOracle.next(), type);
    ByteBuffer buffer = TransactionEventRecord.toByteBuffer(commit);
    int logFileIndex = nextLogWriter(transactionID);
    long usableSpace = logFiles.get(logFileIndex).getUsableSpace();
    long requiredSpace = minimumRequiredSpace + buffer.limit();
    if (usableSpace <= requiredSpace) {
        throw new IOException("Usable space exhaused, only " + usableSpace + " bytes remaining, required "
                + requiredSpace + " bytes");
    }
    boolean error = true;
    try {
        try {
            LogFile.Writer logFileWriter = logFiles.get(logFileIndex);
            // If multiple transactions are committing at the same time,
            // this ensures that the number of actual fsyncs is small and a
            // number of them are grouped together into one.
            logFileWriter.commit(buffer);
            logFileWriter.sync();
            error = false;
        } catch (LogFileRetryableIOException e) {
            if (!open) {
                throw e;
            }
            roll(logFileIndex, buffer);
            LogFile.Writer logFileWriter = logFiles.get(logFileIndex);
            logFileWriter.commit(buffer);
            logFileWriter.sync();
            error = false;
        }
    } finally {
        if (error && open) {
            roll(logFileIndex);
        }
    }
}

From source file:fuse.okuyamafs.OkuyamaFilesystem.java

public int read(String path, Object fh, ByteBuffer buf, long offset) throws FuseException {

    /*long start1 = 0L;//from  ww  w.  jav a2 s  .  co  m
    long start2 = 0L;
    long start3 = 0L;
    long start4 = 0L;
    long start5 = 0L;
            
    long end2 = 0L;
    long end3 = 0L;
    long end4 = 0L;
    long end5 = 0L;
    */
    //start1 = System.nanoTime();
    //start2 = System.nanoTime();

    log.info("read:" + path + " offset:" + offset + " buf.limit:" + buf.limit());
    if (fh == null)
        return Errno.EBADE;
    try {

        String trimToPath = path.trim();
        synchronized (this.parallelDataAccessSync[((path.hashCode() << 1) >>> 1) % 100]) {

            // ???????????flush?
            List bufferedDataFhList = writeBufFpMap.removeGroupingData(path);
            if (bufferedDataFhList != null) {
                for (int idx = 0; idx < bufferedDataFhList.size(); idx++) {
                    Object bFh = bufferedDataFhList.get(idx);
                    this.fixNoCommitData(bFh);
                }
            }
            String pathInfoStr = (String) client.getPathDetail(trimToPath);

            String[] pathInfo = pathInfoStr.split("\t");
            long nowSize = Long.parseLong(pathInfo[4]);
            int readLen = client.readValue(trimToPath, offset, buf.limit(), pathInfo[pathInfo.length - 2], buf);

            if (readLen == -1 || readLen < 1) {

                log.info("read data nothing read=" + "read:" + path + " offset:" + offset + " buf.limit:"
                        + buf.limit());
            }
        }
    } catch (FuseException fe) {
        throw fe;
    } catch (Exception e) {
        new FuseException(e);
    }
    return 0;
}