Example usage for java.nio ByteBuffer limit

List of usage examples for java.nio ByteBuffer limit

Introduction

In this page you can find the example usage for java.nio ByteBuffer limit.

Prototype

public final Buffer limit(int newLimit) 

Source Link

Document

Sets the limit of this buffer.

Usage

From source file:org.apache.storm.daemon.logviewer.handler.LogviewerLogSearchHandler.java

private int rotateGrepBuffer(ByteBuffer buf, BufferedInputStream stream, int totalBytesRead, File file,
        int fileLength) throws IOException {
    byte[] bufArray = buf.array();

    // Copy the 2nd half of the buffer to the first half.
    System.arraycopy(bufArray, GREP_MAX_SEARCH_SIZE, bufArray, 0, GREP_MAX_SEARCH_SIZE);

    // Zero-out the 2nd half to prevent accidental matches.
    Arrays.fill(bufArray, GREP_MAX_SEARCH_SIZE, bufArray.length, (byte) 0);

    // Fill the 2nd half with new bytes from the stream.
    int bytesRead = stream.read(bufArray, GREP_MAX_SEARCH_SIZE,
            Math.min((int) fileLength, GREP_MAX_SEARCH_SIZE));
    buf.limit(GREP_MAX_SEARCH_SIZE + bytesRead);
    return totalBytesRead + bytesRead;
}

From source file:edu.hawaii.soest.kilonalu.adcp.EnsembleFixedLeader.java

/**
 *  Constructor.  This method populates the Fixed Leader fields from 
 *  the given ByteBuffer of data passed in as an argument, based on metadata 
 *  found in the EnsembleHeader.//from   ww  w.jav  a  2 s . c  om
 *
 * @param ensembleBuffer the ByteBuffer that contains the binary ensemble data
 * @param ensemble  the parent ensemble for this fixed leader
 */
public EnsembleFixedLeader(ByteBuffer ensembleBuffer, Ensemble ensemble) {

    // prepare the ensemble buffer for reading
    ensembleBuffer.flip();
    ensembleBuffer.limit(ensembleBuffer.capacity());

    // position the cursor at the correct offset given the sequential location
    // of the fixed leader in the data stream.
    int typeNumber = ensemble.getDataTypeNumber(EnsembleDataType.FIXED_LEADER);
    int offset = ensemble.getDataTypeOffset(typeNumber);
    ensembleBuffer.position(offset);

    // define the temporary arrays for passing bytes
    byte[] oneByte = new byte[1];
    byte[] twoBytes = new byte[2];

    // set all of the FixedLeader fields in the order that they are read from 
    // the byte stream
    ensembleBuffer.get(twoBytes);
    setFixedLeaderID(twoBytes);
    ensemble.addToByteSum(twoBytes);
    ensembleBuffer.get(oneByte);
    setCpuFirmwareVersion(oneByte);
    ensemble.addToByteSum(oneByte);
    ensembleBuffer.get(oneByte);
    setCpuFirmwareRevision(oneByte);
    ensemble.addToByteSum(oneByte);
    ensembleBuffer.get(twoBytes);
    setSystemConfiguration(twoBytes);
    ensemble.addToByteSum(twoBytes);
    ensembleBuffer.get(oneByte);
    setPdRealOrSimulatedFlag(oneByte);
    ensemble.addToByteSum(oneByte);
    ensembleBuffer.get(oneByte);
    setLagLength(oneByte);
    ensemble.addToByteSum(oneByte);
    ensembleBuffer.get(oneByte);
    setNumberOfBeams(oneByte);
    ensemble.addToByteSum(oneByte);
    ensembleBuffer.get(oneByte);
    setNumberOfCells(oneByte);
    ensemble.addToByteSum(oneByte);
    ensembleBuffer.get(twoBytes);
    setPingsPerEnsemble(twoBytes);
    ensemble.addToByteSum(twoBytes);
    ensembleBuffer.get(twoBytes);
    setDepthCellLength(twoBytes);
    ensemble.addToByteSum(twoBytes);
    ensembleBuffer.get(twoBytes);
    setBlankAfterTransmit(twoBytes);
    ensemble.addToByteSum(twoBytes);
    ensembleBuffer.get(oneByte);
    setProfilingMode(oneByte);
    ensemble.addToByteSum(oneByte);
    ensembleBuffer.get(oneByte);
    setLowCorrelationThreshold(oneByte);
    ensemble.addToByteSum(oneByte);
    ensembleBuffer.get(oneByte);
    setNumberOfCodeRepetitions(oneByte);
    ensemble.addToByteSum(oneByte);
    ensembleBuffer.get(oneByte);
    setPercentGoodMinimum(oneByte);
    ensemble.addToByteSum(oneByte);
    ensembleBuffer.get(twoBytes);
    setErrorVelocityThreshold(twoBytes);
    ensemble.addToByteSum(twoBytes);
    ensembleBuffer.get(oneByte);
    setPingMinutes(oneByte);
    ensemble.addToByteSum(oneByte);
    ensembleBuffer.get(oneByte);
    setPingSeconds(oneByte);
    ensemble.addToByteSum(oneByte);
    ensembleBuffer.get(oneByte);
    setPingHundredths(oneByte);
    ensemble.addToByteSum(oneByte);
    ensembleBuffer.get(oneByte);
    setCoordinateTransformParams(oneByte);
    ensemble.addToByteSum(oneByte);
    ensembleBuffer.get(twoBytes);
    setHeadingAlignment(twoBytes);
    ensemble.addToByteSum(twoBytes);
    ensembleBuffer.get(twoBytes);
    setHeadingBias(twoBytes);
    ensemble.addToByteSum(twoBytes);
    ensembleBuffer.get(oneByte);
    setSensorSource(oneByte);
    ensemble.addToByteSum(oneByte);
    ensembleBuffer.get(oneByte);
    setSensorAvailability(oneByte);
    ensemble.addToByteSum(oneByte);
    ensembleBuffer.get(twoBytes);
    setBinOneDistance(twoBytes);
    ensemble.addToByteSum(twoBytes);
    ensembleBuffer.get(twoBytes);
    setTransmitPulseLength(twoBytes);
    ensemble.addToByteSum(twoBytes);
    ensembleBuffer.get(oneByte);
    setReferenceLayerStart(oneByte);
    ensemble.addToByteSum(oneByte);
    ensembleBuffer.get(oneByte);
    setReferenceLayerEnd(oneByte);
    ensemble.addToByteSum(oneByte);
    ensembleBuffer.get(oneByte);
    setFalseTargetThreshold(oneByte);
    ensemble.addToByteSum(oneByte);
    ensembleBuffer.get(oneByte);
    setFixedLeaderSpare(oneByte);
    ensemble.addToByteSum(oneByte);
    ensembleBuffer.get(twoBytes);
    setTransmitLagDistance(twoBytes);
    ensemble.addToByteSum(twoBytes);
    byte[] boardSerialNumber = new byte[8];
    ensembleBuffer.get(boardSerialNumber); // read 8 bytes
    setCpuBoardSerialNumber(boardSerialNumber);
    ensemble.addToByteSum(boardSerialNumber);
    ensembleBuffer.get(twoBytes);
    setSystemBandwidth(twoBytes);
    ensemble.addToByteSum(twoBytes);
    ensembleBuffer.get(oneByte);
    setSystemPower(oneByte);
    ensemble.addToByteSum(oneByte);

    // the following don't get called for Workhorse ADCPs
    // TODO: test for model and add fields if necessary

    //ensembleBuffer.get(oneByte);
    //setBaseFrequencyIndex(oneByte);
    //ensemble.addToByteSum(oneByte);
    //byte[] instrumentSerialNumber = new byte[4];
    //ensembleBuffer.get(instrumentSerialNumber);  // read 4 bytes
    //setSerialNumber(instrumentSerialNumber);
    //ensemble.addToByteSum(instrumentSerialNumber);
    //ensembleBuffer.get(oneByte);
    //setBeamAngle(oneByte);
    //ensemble.addToByteSum(oneByte);

}

From source file:com.p2p.peercds.common.Torrent.java

private static String hashFiles(List<File> files) throws InterruptedException, IOException {
    int threads = getHashingThreadsCount();
    ExecutorService executor = Executors.newFixedThreadPool(threads);
    ByteBuffer buffer = ByteBuffer.allocate(PIECE_LENGTH);
    List<Future<String>> results = new LinkedList<Future<String>>();
    StringBuilder hashes = new StringBuilder();

    long length = 0L;
    int pieces = 0;

    long start = System.nanoTime();
    for (File file : files) {
        logger.info("Hashing data from {} with {} threads ({} pieces)...", new Object[] { file.getName(),
                threads, (int) (Math.ceil((double) file.length() / PIECE_LENGTH)) });

        length += file.length();//from  w w  w.  java2  s.c o  m

        FileInputStream fis = new FileInputStream(file);
        FileChannel channel = fis.getChannel();
        int step = 10;

        try {
            while (channel.read(buffer) > 0) {
                if (buffer.remaining() == 0) {
                    buffer.clear();
                    results.add(executor.submit(new CallableChunkHasher(buffer)));
                }

                if (results.size() >= threads) {
                    pieces += accumulateHashes(hashes, results);
                }

                if (channel.position() / (double) channel.size() * 100f > step) {
                    logger.info("  ... {}% complete", step);
                    step += 10;
                }
            }
        } finally {
            channel.close();
            fis.close();
        }
    }

    // Hash the last bit, if any
    if (buffer.position() > 0) {
        buffer.limit(buffer.position());
        buffer.position(0);
        results.add(executor.submit(new CallableChunkHasher(buffer)));
    }

    pieces += accumulateHashes(hashes, results);

    // Request orderly executor shutdown and wait for hashing tasks to
    // complete.
    executor.shutdown();
    while (!executor.isTerminated()) {
        Thread.sleep(10);
    }
    long elapsed = System.nanoTime() - start;

    int expectedPieces = (int) (Math.ceil((double) length / PIECE_LENGTH));
    logger.info("Hashed {} file(s) ({} bytes) in {} pieces ({} expected) in {}ms.", new Object[] { files.size(),
            length, pieces, expectedPieces, String.format("%.1f", elapsed / 1e6), });

    return hashes.toString();
}

From source file:org.apache.hadoop.hdfs.DFSInputStream.java

private synchronized ByteBuffer tryReadZeroCopy(int maxLength) throws IOException {
    // Java ByteBuffers can't be longer than 2 GB, because they use
    // 4-byte signed integers to represent capacity, etc.
    // So we can't mmap the parts of the block higher than the 2 GB offset.
    // FIXME: we could work around this with multiple memory maps.
    // See HDFS-5101.
    long blockEnd32 = Math.min(Integer.MAX_VALUE, blockEnd);
    long curPos = pos;
    long blockLeft = blockEnd32 - curPos + 1;
    if (blockLeft <= 0) {
        if (DFSClient.LOG.isDebugEnabled()) {
            DFSClient.LOG.debug("unable to perform a zero-copy read from offset " + curPos + " of " + src
                    + "; blockLeft = " + blockLeft + "; blockEnd32 = " + blockEnd32 + ", blockEnd = " + blockEnd
                    + "; maxLength = " + maxLength);
        }/*from w  w  w.  ja  v  a 2  s .  com*/
        return null;
    }
    int length = Math.min((int) blockLeft, maxLength);
    long blockStartInFile = currentLocatedBlock.getStartOffset();
    long blockPos = curPos - blockStartInFile;
    long limit = blockPos + length;
    ClientMmap clientMmap = blockReader.getClientMmap(currentLocatedBlock, dfsClient.getMmapManager());
    if (clientMmap == null) {
        if (DFSClient.LOG.isDebugEnabled()) {
            DFSClient.LOG.debug("unable to perform a zero-copy read from offset " + curPos + " of " + src
                    + "; BlockReader#getClientMmap returned " + "null.");
        }
        return null;
    }
    seek(pos + length);
    ByteBuffer buffer = clientMmap.getMappedByteBuffer().asReadOnlyBuffer();
    buffer.position((int) blockPos);
    buffer.limit((int) limit);
    clientMmap.ref();
    extendedReadBuffers.put(buffer, clientMmap);
    readStatistics.addZeroCopyBytes(length);
    if (DFSClient.LOG.isDebugEnabled()) {
        DFSClient.LOG.debug("readZeroCopy read " + maxLength + " bytes from " + "offset " + curPos
                + " via the zero-copy read path.  " + "blockEnd = " + blockEnd);
    }
    return buffer;
}

From source file:com.koda.integ.hbase.storage.FileExtStorage.java

/**
 * Stores multiple objects in one transaction
 * Format of a buffer:/*from  w w  w. j  av  a2s .c o m*/
 * 0..3 - total size of a batch
 * 4.. - batch of blocks
 *
 * @param buf the buf
 * @return the list
 */
public List<StorageHandle> storeDataBatch(ByteBuffer buf) {
    List<StorageHandle> handles = storeDataNoReleaseLock(buf);
    if (handles == null) {

        handles = new ArrayList<StorageHandle>();

        int size = buf.getInt(0);
        buf.position(4);

        while (buf.position() < size + 4) {
            buf.limit(buf.capacity());
            StorageHandle fsh = storeData(buf);
            handles.add(fsh);
        }
    }
    return handles;

}

From source file:com.koda.integ.hbase.blockcache.OffHeapBlockCacheOld.java

/**
 * Read external with codec./*from www.  ja  va 2  s . co  m*/
 *
 * @param blockName the block name
 * @return the cacheable
 * @throws IOException Signals that an I/O exception has occurred.
 */
private Cacheable readExternalWithCodec(String blockName) throws IOException {
    if (overflowExtEnabled == false)
        return null;
    // Check if we have  already this block in external storage cache
    try {
        // We use 16 - byte hash for external storage cache  
        byte[] hashed = Utils.hash128(blockName);
        StorageHandle handle = (StorageHandle) extStorageCache.get(hashed);
        if (handle == null)
            return null;
        ByteBuffer buffer = extStorageCache.getLocalBufferWithAddress().getBuffer();
        SerDe serde = extStorageCache.getSerDe();
        @SuppressWarnings("unused")
        Codec codec = extStorageCache.getCompressionCodec();

        buffer.clear();

        StorageHandle newHandle = storage.getData(handle, buffer);
        if (buffer.position() > 0)
            buffer.flip();
        int size = buffer.getInt();
        if (size == 0)
            return null;
        // Skip key
        int keySize = buffer.getInt();
        buffer.position(8 + keySize);
        boolean inMemory = buffer.get() == (byte) 1;

        //buffer.position(5);
        buffer.limit(size + 4);
        Cacheable obj = (Cacheable) serde.readCompressed(buffer/*, codec*/);
        if (inMemory) {
            permGenCache.put(blockName, obj);
        } else {
            tenGenCache.put(blockName, obj);
        }
        if (newHandle.equals(handle) == false) {
            extStorageCache.put(hashed, newHandle);
        }

        return obj;

    } catch (NativeMemoryException e) {
        throw new IOException(e);
    }

}

From source file:com.blm.orc.ReaderImpl.java

private static FileMetaInfo extractMetaInfoFromFooter(FileSystem fs, Path path, long maxFileLength)
        throws IOException {
    FSDataInputStream file = fs.open(path);

    // figure out the size of the file using the option or filesystem
    long size;/*from   ww  w . jav  a2s.  c om*/
    if (maxFileLength == Long.MAX_VALUE) {
        size = fs.getFileStatus(path).getLen();
    } else {
        size = maxFileLength;
    }

    //read last bytes into buffer to get PostScript
    int readSize = (int) Math.min(size, DIRECTORY_SIZE_GUESS);
    file.seek(size - readSize);
    ByteBuffer buffer = ByteBuffer.allocate(readSize);
    file.readFully(buffer.array(), buffer.arrayOffset() + buffer.position(), buffer.remaining());

    //read the PostScript
    //get length of PostScript
    int psLen = buffer.get(readSize - 1) & 0xff;
    ensureOrcFooter(file, path, psLen, buffer);
    int psOffset = readSize - 1 - psLen;
    CodedInputStream in = CodedInputStream.newInstance(buffer.array(), buffer.arrayOffset() + psOffset, psLen);
    OrcProto.PostScript ps = OrcProto.PostScript.parseFrom(in);

    checkOrcVersion(LOG, path, ps.getVersionList());

    int footerSize = (int) ps.getFooterLength();
    int metadataSize = (int) ps.getMetadataLength();
    OrcFile.WriterVersion writerVersion;
    if (ps.hasWriterVersion()) {
        writerVersion = getWriterVersion(ps.getWriterVersion());
    } else {
        writerVersion = OrcFile.WriterVersion.ORIGINAL;
    }

    //check compression codec
    switch (ps.getCompression()) {
    case NONE:
        break;
    case ZLIB:
        break;
    case SNAPPY:
        break;
    case LZO:
        break;
    default:
        throw new IllegalArgumentException("Unknown compression");
    }

    //check if extra bytes need to be read
    int extra = Math.max(0, psLen + 1 + footerSize + metadataSize - readSize);
    if (extra > 0) {
        //more bytes need to be read, seek back to the right place and read extra bytes
        file.seek(size - readSize - extra);
        ByteBuffer extraBuf = ByteBuffer.allocate(extra + readSize);
        file.readFully(extraBuf.array(), extraBuf.arrayOffset() + extraBuf.position(), extra);
        extraBuf.position(extra);
        //append with already read bytes
        extraBuf.put(buffer);
        buffer = extraBuf;
        buffer.position(0);
        buffer.limit(footerSize + metadataSize);
    } else {
        //footer is already in the bytes in buffer, just adjust position, length
        buffer.position(psOffset - footerSize - metadataSize);
        buffer.limit(psOffset);
    }

    // remember position for later
    buffer.mark();

    file.close();

    return new FileMetaInfo(ps.getCompression().toString(), (int) ps.getCompressionBlockSize(),
            (int) ps.getMetadataLength(), buffer, ps.getVersionList(), writerVersion);
}

From source file:edu.harvard.iq.dataverse.dataaccess.TabularSubsetGenerator.java

public Object[] subsetObjectVector(File tabfile, int column, int varcount, int casecount, int columntype,
        boolean compatmode) throws IOException {

    Object[] retVector = null;//w w w.  j a  v a 2s  .c  o m

    boolean isString = false;
    boolean isDouble = false;
    boolean isLong = false;
    boolean isFloat = false;

    //Locale loc = new Locale("en", "US");

    if (columntype == COLUMN_TYPE_STRING) {
        isString = true;
        retVector = new String[casecount];
    } else if (columntype == COLUMN_TYPE_DOUBLE) {
        isDouble = true;
        retVector = new Double[casecount];
    } else if (columntype == COLUMN_TYPE_LONG) {
        isLong = true;
        retVector = new Long[casecount];
    } else if (columntype == COLUMN_TYPE_FLOAT) {
        isFloat = true;
        retVector = new Float[casecount];
    } else {
        throw new IOException("Unsupported column type: " + columntype);
    }

    File rotatedImageFile = getRotatedImage(tabfile, varcount, casecount);
    long[] columnEndOffsets = extractColumnOffsets(rotatedImageFile, varcount, casecount);
    long columnOffset = 0;
    long columnLength = 0;

    if (column > 0) {
        columnOffset = columnEndOffsets[column - 1];
        columnLength = columnEndOffsets[column] - columnEndOffsets[column - 1];
    } else {
        columnOffset = varcount * 8;
        columnLength = columnEndOffsets[0] - varcount * 8;
    }

    FileChannel fc = (FileChannel.open(Paths.get(rotatedImageFile.getAbsolutePath()), StandardOpenOption.READ));
    fc.position(columnOffset);
    int MAX_COLUMN_BUFFER = 8192;

    ByteBuffer in = ByteBuffer.allocate(MAX_COLUMN_BUFFER);

    if (columnLength < MAX_COLUMN_BUFFER) {
        in.limit((int) (columnLength));
    }

    long bytesRead = 0;
    long bytesReadTotal = 0;
    int caseindex = 0;
    int byteoffset = 0;
    byte[] leftover = null;

    while (bytesReadTotal < columnLength) {
        bytesRead = fc.read(in);
        byte[] columnBytes = in.array();
        int bytecount = 0;

        while (bytecount < bytesRead) {
            if (columnBytes[bytecount] == '\n') {
                /*
                String token = new String(columnBytes, byteoffset, bytecount-byteoffset, "UTF8");
                        
                if (leftover != null) {
                String leftoverString = new String (leftover, "UTF8");
                token = leftoverString + token;
                leftover = null;
                }
                */
                /* 
                 * Note that the way I was doing it at first - above - 
                 * was not quite the correct way - because I was creating UTF8
                 * strings from the leftover bytes, and the bytes in the 
                 * current buffer *separately*; which means, if a multi-byte
                 * UTF8 character got split in the middle between one buffer
                 * and the next, both chunks of it would become junk 
                 * characters, on each side!
                 * The correct way of doing it, of course, is to create a
                 * merged byte buffer, and then turn it into a UTF8 string. 
                 *      -- L.A. 4.0
                 */
                String token = null;

                if (leftover == null) {
                    token = new String(columnBytes, byteoffset, bytecount - byteoffset, "UTF8");
                } else {
                    byte[] merged = new byte[leftover.length + bytecount - byteoffset];

                    System.arraycopy(leftover, 0, merged, 0, leftover.length);
                    System.arraycopy(columnBytes, byteoffset, merged, leftover.length, bytecount - byteoffset);
                    token = new String(merged, "UTF8");
                    leftover = null;
                    merged = null;
                }

                if (isString) {
                    if ("".equals(token)) {
                        // An empty string is a string missing value!
                        // An empty string in quotes is an empty string!
                        retVector[caseindex] = null;
                    } else {
                        // Strip the outer quotes:
                        token = token.replaceFirst("^\\\"", "");
                        token = token.replaceFirst("\\\"$", "");

                        // We need to restore the special characters that 
                        // are stored in tab files escaped - quotes, new lines 
                        // and tabs. Before we do that however, we need to 
                        // take care of any escaped backslashes stored in 
                        // the tab file. I.e., "foo\t" should be transformed 
                        // to "foo<TAB>"; but "foo\\t" should be transformed 
                        // to "foo\t". This way new lines and tabs that were
                        // already escaped in the original data are not 
                        // going to be transformed to unescaped tab and 
                        // new line characters!

                        String[] splitTokens = token.split(Matcher.quoteReplacement("\\\\"), -2);

                        // (note that it's important to use the 2-argument version 
                        // of String.split(), and set the limit argument to a
                        // negative value; otherwise any trailing backslashes 
                        // are lost.)

                        for (int i = 0; i < splitTokens.length; i++) {
                            splitTokens[i] = splitTokens[i].replaceAll(Matcher.quoteReplacement("\\\""), "\"");
                            splitTokens[i] = splitTokens[i].replaceAll(Matcher.quoteReplacement("\\t"), "\t");
                            splitTokens[i] = splitTokens[i].replaceAll(Matcher.quoteReplacement("\\n"), "\n");
                            splitTokens[i] = splitTokens[i].replaceAll(Matcher.quoteReplacement("\\r"), "\r");
                        }
                        // TODO: 
                        // Make (some of?) the above optional; for ex., we 
                        // do need to restore the newlines when calculating UNFs;
                        // But if we are subsetting these vectors in order to 
                        // create a new tab-delimited file, they will 
                        // actually break things! -- L.A. Jul. 28 2014

                        token = StringUtils.join(splitTokens, '\\');

                        // "compatibility mode" - a hack, to be able to produce
                        // unfs identical to those produced by the "early" 
                        // unf5 jar; will be removed in production 4.0. 
                        // -- L.A. (TODO: ...)
                        if (compatmode && !"".equals(token)) {
                            if (token.length() > 128) {
                                if ("".equals(token.trim())) {
                                    // don't ask... 
                                    token = token.substring(0, 129);
                                } else {
                                    token = token.substring(0, 128);
                                    //token = String.format(loc, "%.128s", token);
                                    token = token.trim();
                                    //dbgLog.info("formatted and trimmed: "+token);
                                }
                            } else {
                                if ("".equals(token.trim())) {
                                    // again, don't ask; 
                                    // - this replicates some bugginness 
                                    // that happens inside unf5;
                                    token = "null";
                                } else {
                                    token = token.trim();
                                }
                            }
                        }

                        retVector[caseindex] = token;
                    }
                } else if (isDouble) {
                    try {
                        // TODO: verify that NaN and +-Inf are 
                        // handled correctly here! -- L.A.
                        // Verified: new Double("nan") works correctly, 
                        // resulting in Double.NaN;
                        // Double("[+-]Inf") doesn't work however; 
                        // (the constructor appears to be expecting it
                        // to be spelled as "Infinity", "-Infinity", etc. 
                        if ("inf".equalsIgnoreCase(token) || "+inf".equalsIgnoreCase(token)) {
                            retVector[caseindex] = java.lang.Double.POSITIVE_INFINITY;
                        } else if ("-inf".equalsIgnoreCase(token)) {
                            retVector[caseindex] = java.lang.Double.NEGATIVE_INFINITY;
                        } else if (token == null || token.equals("")) {
                            // missing value:
                            retVector[caseindex] = null;
                        } else {
                            retVector[caseindex] = new Double(token);
                        }
                    } catch (NumberFormatException ex) {
                        dbgLog.warning("NumberFormatException thrown for " + token + " as Double");

                        retVector[caseindex] = null; // missing value
                        // TODO: ?
                    }
                } else if (isLong) {
                    try {
                        retVector[caseindex] = new Long(token);
                    } catch (NumberFormatException ex) {
                        retVector[caseindex] = null; // assume missing value
                    }
                } else if (isFloat) {
                    try {
                        if ("inf".equalsIgnoreCase(token) || "+inf".equalsIgnoreCase(token)) {
                            retVector[caseindex] = java.lang.Float.POSITIVE_INFINITY;
                        } else if ("-inf".equalsIgnoreCase(token)) {
                            retVector[caseindex] = java.lang.Float.NEGATIVE_INFINITY;
                        } else if (token == null || token.equals("")) {
                            // missing value:
                            retVector[caseindex] = null;
                        } else {
                            retVector[caseindex] = new Float(token);
                        }
                    } catch (NumberFormatException ex) {
                        dbgLog.warning("NumberFormatException thrown for " + token + " as Float");
                        retVector[caseindex] = null; // assume missing value (TODO: ?)
                    }
                }
                caseindex++;

                if (bytecount == bytesRead - 1) {
                    byteoffset = 0;
                } else {
                    byteoffset = bytecount + 1;
                }
            } else {
                if (bytecount == bytesRead - 1) {
                    // We've reached the end of the buffer; 
                    // This means we'll save whatever unused bytes left in 
                    // it - i.e., the bytes between the last new line 
                    // encountered and the end - in the leftover buffer. 

                    // *EXCEPT*, there may be a case of a very long String
                    // that is actually longer than MAX_COLUMN_BUFFER, in 
                    // which case it is possible that we've read through
                    // an entire buffer of bytes without finding any 
                    // new lines... in this case we may need to add this
                    // entire byte buffer to an already existing leftover 
                    // buffer!
                    if (leftover == null) {
                        leftover = new byte[(int) bytesRead - byteoffset];
                        System.arraycopy(columnBytes, byteoffset, leftover, 0, (int) bytesRead - byteoffset);
                    } else {
                        if (byteoffset != 0) {
                            throw new IOException(
                                    "Reached the end of the byte buffer, with some leftover left from the last read; yet the offset is not zero!");
                        }
                        byte[] merged = new byte[leftover.length + (int) bytesRead];

                        System.arraycopy(leftover, 0, merged, 0, leftover.length);
                        System.arraycopy(columnBytes, byteoffset, merged, leftover.length, (int) bytesRead);
                        //leftover = null;
                        leftover = merged;
                        merged = null;
                    }
                    byteoffset = 0;

                }
            }
            bytecount++;
        }

        bytesReadTotal += bytesRead;
        in.clear();
        if (columnLength - bytesReadTotal < MAX_COLUMN_BUFFER) {
            in.limit((int) (columnLength - bytesReadTotal));
        }
    }

    fc.close();

    if (caseindex != casecount) {
        throw new IOException("Faile to read " + casecount + " tokens for column " + column);
        //System.out.println("read "+caseindex+" tokens instead of expected "+casecount+".");
    }

    return retVector;
}

From source file:jext2.DataInode.java

/**
 * Read Inode data/*w  w w  .j  av a  2s . c  o  m*/
 * @param  size    size of the data to be read
 * @param  offset  start address in data area
 * @return buffer of size size containing data.
 * @throws FileTooLarge
 * @throws IoError
 */
public ByteBuffer readData(int size, long fileOffset) throws JExt2Exception, FileTooLarge {
    /* Returning null may break things somewhere..
     * Zero length buffer breaks something in jlowfuse's c code */
    if (getSize() == 0)
        return ByteBuffer.allocateDirect(1);

    /*
     * size may be larger than the inode.size, it doesn't make sense to return
     * 4k of zeros
     */
    if (size > getSize())
        size = (int) getSize();

    ByteBuffer buf = ByteBuffer.allocateDirect(size);

    int blocksize = superblock.getBlocksize();

    long i = 0;
    long firstBlock = fileOffset / blocksize;
    long offset = fileOffset % blocksize;

    /*
     * just as size may be larger than the inode's data, the number of blocks
     * may also be.
     */
    long approxBlocks = (size / blocksize) + 1;
    long maxBlocks = this.getBlocks() / (superblock.getBlocksize() / 512);
    if (approxBlocks > maxBlocks)
        approxBlocks = maxBlocks;

    while (i < approxBlocks) {
        long start = firstBlock + i;
        long stop = firstBlock + approxBlocks;

        LinkedList<Long> b = accessData().getBlocks(start, stop);
        int blocksRead;

        /*
         * Note on the sparse file support:
         * getBlocks will return null if there is no data block for this
         * logical address. So just move the position count blocks forward.
         */

        if (b == null) { /* hole */
            blocksRead = 1;

            int unboundedLimit = buf.position() + blocksize;
            int limit = Math.min(unboundedLimit, buf.capacity());

            assert limit <= buf.capacity() : "New position, limit " + limit + " is beyond buffer's capacity, "
                    + buf;

            buf.limit(limit);
            buf.position(limit);

            assert buf.limit() == buf.position();

        } else { /* blocks */
            blocksRead = b.size();

            long pos = b.getFirst() * blocksize + offset;
            int unboundedLimit = buf.position() + blocksRead * blocksize;
            int limit = Math.min(unboundedLimit, buf.capacity());

            assert limit <= buf.capacity() : "New limit " + limit + " is beyond buffer's capacity, " + buf;

            buf.limit(limit);
            blockAccess.readToBufferUnsynchronized(pos, buf);
        }

        i += blocksRead;
        offset = 0;

        /* This should be removed soon. IllegalMonitorStateException happen
         * occasionally for unknown reasons.
         */
        try {
            accessData().getHierarchyLock().readLock().unlock();
        } catch (IllegalMonitorStateException e) {
            Logger log = Filesystem.getLogger();
            log.warning("IllegalMonitorStateException encountered in readData, inode=" + this);
            log.warning(String.format(
                    "context for exception: blocks=%s i=%d approxBlocks=%d off=%d buf=%s readlock=%s lock.readlock.holds=%s",
                    b, i, approxBlocks, fileOffset, buf, accessData().getHierarchyLock(),
                    accessData().getHierarchyLock().getReadHoldCount()));
        }

        if (buf.capacity() == buf.limit())
            break;
    }

    assert buf.position() == buf.limit() : "Buffer wasn't filled completely";
    assert buf.limit() == size : "Read buffer size does not match request size";

    if (buf.limit() > getSize())
        buf.limit((int) getSize());

    buf.rewind();
    return buf;
}

From source file:org.apache.storm.daemon.logviewer.handler.LogviewerLogSearchHandler.java

private Map<String, Object> substringSearch(File file, String searchString, boolean isDaemon,
        Integer numMatches, Integer startByteOffset) throws InvalidRequestException {
    try {/*from w  w w . ja v a2 s  . co m*/
        if (StringUtils.isEmpty(searchString)) {
            throw new IllegalArgumentException("Precondition fails: search string should not be empty.");
        }
        if (searchString.getBytes(StandardCharsets.UTF_8).length > GREP_MAX_SEARCH_SIZE) {
            throw new IllegalArgumentException(
                    "Precondition fails: the length of search string should be less than "
                            + GREP_MAX_SEARCH_SIZE);
        }

        boolean isZipFile = file.getName().endsWith(".gz");
        try (InputStream fis = Files.newInputStream(file.toPath());
                InputStream gzippedInputStream = isZipFile ? new GZIPInputStream(fis) : fis;
                BufferedInputStream stream = new BufferedInputStream(gzippedInputStream)) {

            int fileLength;
            if (isZipFile) {
                fileLength = (int) ServerUtils.zipFileSize(file);
            } else {
                fileLength = (int) file.length();
            }

            ByteBuffer buf = ByteBuffer.allocate(GREP_BUF_SIZE);
            final byte[] bufArray = buf.array();
            final byte[] searchBytes = searchString.getBytes(StandardCharsets.UTF_8);
            numMatches = numMatches != null ? numMatches : 10;
            startByteOffset = startByteOffset != null ? startByteOffset : 0;

            // Start at the part of the log file we are interested in.
            // Allow searching when start-byte-offset == file-len so it doesn't blow up on 0-length files
            if (startByteOffset > fileLength) {
                throw new InvalidRequestException("Cannot search past the end of the file");
            }

            if (startByteOffset > 0) {
                StreamUtil.skipBytes(stream, startByteOffset);
            }

            Arrays.fill(bufArray, (byte) 0);

            int totalBytesRead = 0;
            int bytesRead = stream.read(bufArray, 0, Math.min((int) fileLength, GREP_BUF_SIZE));
            buf.limit(bytesRead);
            totalBytesRead += bytesRead;

            List<Map<String, Object>> initialMatches = new ArrayList<>();
            int initBufOffset = 0;
            int byteOffset = startByteOffset;
            byte[] beforeBytes = null;

            Map<String, Object> ret = new HashMap<>();
            while (true) {
                SubstringSearchResult searchRet = bufferSubstringSearch(isDaemon, file, fileLength, byteOffset,
                        initBufOffset, stream, startByteOffset, totalBytesRead, buf, searchBytes,
                        initialMatches, numMatches, beforeBytes);

                List<Map<String, Object>> matches = searchRet.getMatches();
                Integer newByteOffset = searchRet.getNewByteOffset();
                byte[] newBeforeBytes = searchRet.getNewBeforeBytes();

                if (matches.size() < numMatches && totalBytesRead + startByteOffset < fileLength) {
                    // The start index is positioned to find any possible
                    // occurrence search string that did not quite fit in the
                    // buffer on the previous read.
                    final int newBufOffset = Math.min(buf.limit(), GREP_MAX_SEARCH_SIZE) - searchBytes.length;

                    totalBytesRead = rotateGrepBuffer(buf, stream, totalBytesRead, file, fileLength);
                    if (totalBytesRead < 0) {
                        throw new InvalidRequestException("Cannot search past the end of the file");
                    }

                    initialMatches = matches;
                    initBufOffset = newBufOffset;
                    byteOffset = newByteOffset;
                    beforeBytes = newBeforeBytes;
                } else {
                    ret.put("isDaemon", isDaemon ? "yes" : "no");
                    Integer nextByteOffset = null;
                    if (matches.size() >= numMatches || totalBytesRead < fileLength) {
                        nextByteOffset = (Integer) last(matches).get("byteOffset") + searchBytes.length;
                        if (fileLength <= nextByteOffset) {
                            nextByteOffset = null;
                        }
                    }
                    ret.putAll(mkGrepResponse(searchBytes, startByteOffset, matches, nextByteOffset));
                    break;
                }
            }
            return ret;
        }
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
}