Example usage for java.nio ByteBuffer clear

List of usage examples for java.nio ByteBuffer clear

Introduction

In this page you can find the example usage for java.nio ByteBuffer clear.

Prototype

public final Buffer clear() 

Source Link

Document

Clears this buffer.

Usage

From source file:com.android.camera.one.v2.OneCameraZslImpl.java

/**
 * Given an image reader, extracts the JPEG image bytes and then closes the
 * reader./*w  w  w  . ja  v  a 2  s  .c o  m*/
 *
 * @param img the image from which to extract jpeg bytes or compress to
 *            jpeg.
 * @param degrees the angle to rotate the image clockwise, in degrees. Rotation is
 *            only applied to YUV images.
 * @return The bytes of the JPEG image. Newly allocated.
 */
private byte[] acquireJpegBytes(Image img, int degrees) {
    ByteBuffer buffer;

    if (img.getFormat() == ImageFormat.JPEG) {
        Image.Plane plane0 = img.getPlanes()[0];
        buffer = plane0.getBuffer();

        byte[] imageBytes = new byte[buffer.remaining()];
        buffer.get(imageBytes);
        buffer.rewind();
        return imageBytes;
    } else if (img.getFormat() == ImageFormat.YUV_420_888) {
        buffer = mJpegByteBufferPool.acquire();
        if (buffer == null) {
            buffer = ByteBuffer.allocateDirect(img.getWidth() * img.getHeight() * 3);
        }

        int numBytes = JpegUtilNative.compressJpegFromYUV420Image(new AndroidImageProxy(img), buffer,
                JPEG_QUALITY, degrees);

        if (numBytes < 0) {
            throw new RuntimeException("Error compressing jpeg.");
        }

        buffer.limit(numBytes);

        byte[] imageBytes = new byte[buffer.remaining()];
        buffer.get(imageBytes);

        buffer.clear();
        mJpegByteBufferPool.release(buffer);

        return imageBytes;
    } else {
        throw new RuntimeException("Unsupported image format.");
    }
}

From source file:edu.hawaii.soest.kilonalu.adam.AdamDispatcher.java

/**
 * A method that executes the streaming of data from the source to the RBNB
 * server after all configuration of settings, connections to hosts, and
 * thread initiatizing occurs.  This method contains the detailed code for 
 * streaming the data and interpreting the stream.
 *///  w w w.ja  va  2 s  .co m
protected boolean execute() {
    logger.info("AdamDispatcher.execute() called.");
    // do not execute the stream if there is no connection
    if (!isConnected())
        return false;

    boolean failed = false;

    // while data are being sent, read them into the buffer
    try {

        // Create a buffer that will store the sample bytes as they are read
        byte[] bufferArray = new byte[getBufferSize()];

        // and a ByteBuffer used to transfer the bytes to the parser
        ByteBuffer sampleBuffer = ByteBuffer.allocate(getBufferSize());

        this.datagramPacket = new DatagramPacket(bufferArray, bufferArray.length);

        // while there are bytes to read from the socket ...
        while (!failed) {

            // receive any incoming UDP packets and parse the data payload
            datagramSocket.receive(this.datagramPacket);

            logger.debug("Host: " + datagramPacket.getAddress() + " data: "
                    + new String(Hex.encodeHex(datagramPacket.getData())));

            // the address seems to be returned with a leading slash (/). Trim it.
            String datagramAddress = datagramPacket.getAddress().toString().replaceAll("/", "");

            sampleBuffer.put(datagramPacket.getData());

            // Given the IP address of the source UDP packet and the data ByteBuffer,
            // find the correct source in the sourceMap hash and process the data
            if (sourceMap.get(datagramAddress) != null) {

                AdamSource source = sourceMap.get(datagramAddress);

                // process the data using the AdamSource driver
                source.process(datagramAddress, this.xmlConfiguration, sampleBuffer);

            } else {
                logger.debug("There is no configuration information for " + "the ADAM module at "
                        + datagramAddress + ". Please add the configuration to the "
                        + "sensor.properties.xml configuration file.");
            }

            sampleBuffer.clear();

        } // end while (more socket bytes to read)

        disconnect();
        //      
    } catch (IOException e) {
        // handle exceptions
        // In the event of an i/o exception, log the exception, and allow execute()
        // to return false, which will prompt a retry.
        failed = true;
        e.printStackTrace();
        return !failed;

    }

    return !failed;
}

From source file:org.alfresco.contentstore.ChecksumTest.java

@Test
public void test11() throws IOException {
    try (InputStream in = getClass().getClassLoader().getResourceAsStream("marbles-uncompressed.tif");
            InputStream in1 = getClass().getClassLoader().getResourceAsStream("marbles-uncompressed1.tif")) {
        ByteBuffer buf1 = ByteBuffer.allocate(8192);
        ByteBuffer buf2 = ByteBuffer.allocate(8192);
        ReadableByteChannel channel1 = Channels.newChannel(in);
        ReadableByteChannel channel2 = Channels.newChannel(in1);
        int numRead1 = -1;
        int numRead2 = -1;
        int total = 0;
        int same = 0;
        int i = 0;
        do {// w w  w  .  j ava 2 s .c  o m
            total = 0;
            same = 0;

            numRead1 = channel1.read(buf1);
            numRead2 = channel2.read(buf2);
            i += 8192;
            //                buf1.clear();
            //                buf2.clear();
            //
            //                numRead1 = channel1.read(buf1);
            //                numRead2 = channel2.read(buf2);

            buf1.flip();
            buf2.flip();

            if (numRead1 > 0 && numRead2 > 0) {
                if (numRead1 <= numRead2) {
                    while (buf1.hasRemaining()) {
                        total++;
                        byte b1 = buf1.get();
                        byte b2 = buf2.get();
                        if (b1 == b2) {
                            same++;
                        }
                    }
                } else {
                    while (buf2.hasRemaining()) {
                        total++;
                        byte b1 = buf1.get();
                        byte b2 = buf2.get();
                        if (b1 == b2) {
                            same++;
                        }
                    }
                }
            }

            buf1.clear();
            buf2.clear();
        } while (numRead1 > 0 && numRead2 > 0 && same < total);
        //            while(numRead1 > 0 && numRead1 == numRead2);

        System.out.println(i + ", " + numRead1 + ", " + numRead2 + ", " + total + ", " + same + ", "
                + (double) same / total);
    }
}

From source file:au.org.theark.core.service.ArkCommonServiceImpl.java

/**
 * {@inheritDoc}/*  w  w w.  ja va 2  s.  c  o m*/
 */
public void copyArkLargeFileAttachments(String sourceFilePath, String destinationFilePath) throws IOException {
    FileChannel source = null;
    FileChannel destination = null;

    try {
        source = new FileInputStream(new File(sourceFilePath)).getChannel();
        destination = new FileOutputStream(new File(destinationFilePath)).getChannel();

        // This fails with Map Failed exception on large files
        // destination.transferFrom(source, 0, source.size());

        ByteBuffer buf = ByteBuffer.allocateDirect(DEFAULT_BUFFER_SIZE);
        while ((source.read(buf)) != -1) {
            buf.flip();
            destination.write(buf);
            buf.clear();
        }
    } finally {
        if (source != null) {
            source.close();
        }
        if (destination != null) {
            destination.close();
        }
    }
}

From source file:com.castis.sysComp.PoisConverterSysComp.java

private List<subDataDTO> writeNodeInfoOnFile(int byteSize, ByteBuffer byteBuffer,
        GatheringByteChannel outByteCh, InputDataDTO data, String isLeafNode) {

    StringBuffer strBuffer = new StringBuffer();
    strBuffer.append(data.getRegion());//from   www .j av  a2  s . c o m
    strBuffer.append("|");
    strBuffer.append(data.getCategory());
    strBuffer.append("|");
    strBuffer.append(data.getWeekday());
    strBuffer.append("|");
    strBuffer.append(data.getHour());
    strBuffer.append("|");
    strBuffer.append(data.getPlatform());
    strBuffer.append("|");
    strBuffer.append(data.getCount());
    List<subDataDTO> subDataList = getSubDataList(data);

    // leafNode or not(Y/N)
    strBuffer.append("|");
    strBuffer.append(isLeafNode);

    strBuffer.append("\r\n");

    byte[] outByte = null;
    try {
        outByte = strBuffer.toString().getBytes("UTF-8");
    } catch (UnsupportedEncodingException e2) {
        e2.printStackTrace();
    }
    byteBuffer.put(outByte);
    byteBuffer.flip();
    try {
        outByteCh.write(byteBuffer);
    } catch (IOException e) {
    }
    byteBuffer.clear();

    return subDataList;
}

From source file:org.apache.hadoop.crypto.CryptoStreamsTestBase.java

@Test(timeout = 120000)
public void testCombinedOp() throws Exception {
    OutputStream out = getOutputStream(defaultBufferSize);
    writeData(out);/*  w ww. j a v  a2  s .  com*/

    final int len1 = dataLen / 8;
    final int len2 = dataLen / 10;

    InputStream in = getInputStream(defaultBufferSize);
    // Read len1 data.
    byte[] readData = new byte[len1];
    readAll(in, readData, 0, len1);
    byte[] expectedData = new byte[len1];
    System.arraycopy(data, 0, expectedData, 0, len1);
    Assert.assertArrayEquals(readData, expectedData);

    long pos = ((Seekable) in).getPos();
    Assert.assertEquals(len1, pos);

    // Seek forward len2
    ((Seekable) in).seek(pos + len2);
    // Skip forward len2
    long n = in.skip(len2);
    Assert.assertEquals(len2, n);

    // Pos: 1/4 dataLen
    positionedReadCheck(in, dataLen / 4);

    // Pos should be len1 + len2 + len2
    pos = ((Seekable) in).getPos();
    Assert.assertEquals(len1 + len2 + len2, pos);

    // Read forward len1
    ByteBuffer buf = ByteBuffer.allocate(len1);
    int nRead = ((ByteBufferReadable) in).read(buf);
    Assert.assertEquals(nRead, buf.position());
    readData = new byte[nRead];
    buf.rewind();
    buf.get(readData);
    expectedData = new byte[nRead];
    System.arraycopy(data, (int) pos, expectedData, 0, nRead);
    Assert.assertArrayEquals(readData, expectedData);

    long lastPos = pos;
    // Pos should be lastPos + nRead
    pos = ((Seekable) in).getPos();
    Assert.assertEquals(lastPos + nRead, pos);

    // Pos: 1/3 dataLen
    positionedReadCheck(in, dataLen / 3);

    // Read forward len1
    readData = new byte[len1];
    readAll(in, readData, 0, len1);
    expectedData = new byte[len1];
    System.arraycopy(data, (int) pos, expectedData, 0, len1);
    Assert.assertArrayEquals(readData, expectedData);

    lastPos = pos;
    // Pos should be lastPos + len1
    pos = ((Seekable) in).getPos();
    Assert.assertEquals(lastPos + len1, pos);

    // Read forward len1
    buf = ByteBuffer.allocate(len1);
    nRead = ((ByteBufferReadable) in).read(buf);
    Assert.assertEquals(nRead, buf.position());
    readData = new byte[nRead];
    buf.rewind();
    buf.get(readData);
    expectedData = new byte[nRead];
    System.arraycopy(data, (int) pos, expectedData, 0, nRead);
    Assert.assertArrayEquals(readData, expectedData);

    lastPos = pos;
    // Pos should be lastPos + nRead
    pos = ((Seekable) in).getPos();
    Assert.assertEquals(lastPos + nRead, pos);

    // ByteBuffer read after EOF
    ((Seekable) in).seek(dataLen);
    buf.clear();
    n = ((ByteBufferReadable) in).read(buf);
    Assert.assertEquals(n, -1);

    in.close();
}

From source file:com.inclouds.hbase.rowcache.RowCache.java

/**
 * CHECKED 2 Prepare key for Get op./*from  ww  w . j av  a  2  s  . co m*/
 * 
 * @param buf
 *          the buf
 * @param tableName
 *          the table name
 * @param row
 *          the row
 * @param offset
 *          the offset
 * @param size
 *          the size
 * @param columnFamily
 *          the column family
 * @param column
 *          the column
 */
private void prepareKeyForGet(ByteBuffer buf, byte[] tableName, byte[] row, int offset, int size,
        byte[] columnFamily, byte[] column) {

    buf.clear();
    int totalSize = 2 + tableName.length + // table
            2 + size + // row
            ((columnFamily != null) ? (2 + columnFamily.length) : 0) + // family
            ((column != null) ? (4 + column.length) : 0); // column
    buf.putInt(totalSize);
    // 4 bytes to keep key length;
    buf.putShort((short) tableName.length);
    buf.put(tableName);
    buf.putShort((short) size);
    buf.put(row, offset, size);
    if (columnFamily != null) {
        buf.putShort((short) columnFamily.length);
        buf.put(columnFamily);
    }
    if (column != null) {
        buf.putInt(column.length);
        buf.put(column);
    }
    // prepare for read
    // buf.flip();

}

From source file:com.inclouds.hbase.rowcache.RowCache.java

/**
 * CHECKED 2 Prepare key for Get op.//from w  w w  .  j  ava 2s. c  om
 * 
 * @param buf
 *          the buf
 * @param tableName
 *          the table name
 * @param row
 *          the row
 * @param offset
 *          the offset
 * @param size
 *          the size
 * @param columnFamily
 *          the column family
 * @param column
 *          the column
 */
private void prepareKeyForPut(ByteBuffer buf, byte[] tableName, byte[] row, int offset, int size,
        byte[] columnFamily, byte[] column) {

    buf.clear();
    int totalSize = 2 + tableName.length + // table
            2 + size + // row
            ((columnFamily != null) ? (2 + columnFamily.length) : 0) + // family
            ((column != null) ? (4 + column.length) : 0); // column
    buf.putInt(totalSize);
    // 4 bytes to keep key length;
    // skip 4 bytyes for Value length
    buf.position(8);
    buf.putShort((short) tableName.length);
    buf.put(tableName);
    buf.putShort((short) size);
    buf.put(row, offset, size);
    if (columnFamily != null) {
        buf.putShort((short) columnFamily.length);
        buf.put(columnFamily);
    }
    if (column != null) {
        buf.putInt(column.length);
        buf.put(column);
    }
    // prepare for read
    // buf.flip();

}

From source file:org.alfresco.contentstore.patch.PatchServiceImpl.java

@Override
public void updatePatchDocument(PatchDocument patchDocument, NodeChecksums checksums, ByteBuffer data) {
    int blockSize = checksums.getBlockSize();

    patchDocument.setBlockSize(blockSize);

    int i = 0;/*from   www  .  j ava  2 s.  c o m*/

    Adler32 adlerInfo = new Adler32(hasher);
    int lastMatchIndex = 0;
    ByteBuffer currentPatch = ByteBuffer.allocate(600000); // TODO

    int currentPatchSize = 0;

    for (;;) {
        int chunkSize = 0;
        // determine the size of the next data chuck to evaluate. Default to
        // blockSize, but clamp to end of data
        if ((i + blockSize) > data.limit()) {
            chunkSize = data.limit() - i;
            adlerInfo.reset(); // need to reset this because the rolling
                               // checksum doesn't work correctly on a final
                               // non-aligned block
        } else {
            chunkSize = blockSize;
        }

        int matchedBlock = adlerInfo.checkMatch(lastMatchIndex, checksums, data, i, i + chunkSize - 1);
        if (matchedBlock != -1) {
            //                try
            //                {
            //                    String y = hasher.md5(data, i, i + chunkSize - 1);
            //                    System.out.println("y = " + y);
            //                }
            //                catch (NoSuchAlgorithmException e)
            //                {
            //                    // TODO Auto-generated catch block
            //                    e.printStackTrace();
            //                }
            // if we have a match, do the following:
            // 1) add the matched block index to our tracking buffer
            // 2) check to see if there's a current patch. If so, add it to
            // the patch document.
            // 3) jump forward blockSize bytes and continue
            patchDocument.addMatchedBlock(matchedBlock);

            if (currentPatchSize > 0) {
                // there are outstanding patches, add them to the list
                // create the patch and append it to the patches buffer
                currentPatch.flip();
                int size = currentPatch.limit();
                byte[] dst = new byte[size];
                currentPatch.get(dst, 0, size);
                Patch patch = new Patch(lastMatchIndex, size, dst);
                patchDocument.addPatch(patch);
                currentPatch.clear();
            }

            lastMatchIndex = matchedBlock;

            i += chunkSize;

            adlerInfo.reset();

            continue;
        } else {
            // while we don't have a block match, append bytes to the
            // current patch
            logger.debug("limit = " + currentPatch.limit() + ", position = " + currentPatch.position());
            currentPatch.put(data.get(i));
            currentPatchSize++;
        }
        if (i >= data.limit() - 1) {
            break;
        }
        i++;
    } // end for each byte in the data

    if (currentPatchSize > 0) {
        currentPatch.flip();
        int size = currentPatch.limit();
        byte[] dst = new byte[size];
        currentPatch.get(dst, 0, size);
        Patch patch = new Patch(lastMatchIndex, size, dst);
        patchDocument.addPatch(patch);
    }
}

From source file:com.act.lcms.v2.fullindex.Builder.java

protected void extractTriples(Iterator<LCMSSpectrum> iter, List<MZWindow> windows)
        throws RocksDBException, IOException {
    /* Warning: this method makes heavy use of ByteBuffers to perform memory efficient collection of values and
     * conversion of those values into byte arrays that RocksDB can consume.  If you haven't already, go read this
     * tutorial on ByteBuffers: http://mindprod.com/jgloss/bytebuffer.html
     */*from   w ww.  ja va 2 s  .  c om*/
     * ByteBuffers are quite low-level structures, and they use some terms you need to watch out for:
     *   capacity: The total number of bytes in the array backing the buffer.  Don't write more than this.
     *   position: The next index in the buffer to read or write a byte.  Moves with each read or write op.
     *   limit:    A mark of where the final byte in the buffer was written.  Don't read past this.
     *             The remaining() call is affected by the limit.
     *   mark:     Ignore this for now, we don't use it.  (We'll always, always read buffers from 0.)
     *
     * And here are some methods that we'll use often:
     *   clear:     Set position = 0, limit = 0.  Pretend the buffer is empty, and is ready for more writes.
     *   flip:      Set limit = position, then position = 0.  This remembers how many bytes were written to the buffer
     *              (as the current position), and then puts the position at the beginning.
     *              Always call this after the write before a read.
     *   rewind:    Set position = 0.  Buffer is ready for reading, but unless the limit was set we might now know how
     *              many bytes there are to read.  Always call flip() before rewind().  Can rewind many times to re-read
     *              the buffer repeatedly.
     *   remaining: How many bytes do we have left to read?  Requires an accurate limit value to avoid garbage bytes.
     *   reset:     Don't use this.  It uses the mark, which we don't need currently.
     *
     * Write/read patterns look like:
     *   buffer.clear(); // Clear out anything already in the buffer.
     *   buffer.put(thing1).put(thing2)... // write a bunch of stuff
     *   buffer.flip(); // Prep for reading.  Call *once*!
     *
     *   while (buffer.hasRemaining()) { buffer.get(); } // Read a bunch of stuff.
     *   buffer.rewind(); // Ready for reading again!
     *   while (buffer.hasRemaining()) { buffer.get(); } // Etc.
     *   buffer.reset(); // Forget what was written previously, buffer is ready for reuse.
     *
     * We use byte buffers because they're fast, efficient, and offer incredibly convenient means of serializing a
     * stream of primitive types to their minimal binary representations.  The same operations on objects + object
     * streams require significantly more CPU cycles, consume more memory, and tend to be brittle (i.e. if a class
     * definition changes slightly, serialization may break).  Since the data we're dealing with is pretty simple, we
     * opt for the low-level approach.
     */

    /* Because we'll eventually use the window indices to map a mz range to a list of triples that fall within that
     * range, verify that all of the indices are unique.  If they're not, we'll end up overwriting the data in and
     * corrupting the structure of the index. */
    ensureUniqueMZWindowIndices(windows);

    // For every mz window, allocate a buffer to hold the indices of the triples that fall in that window.
    ByteBuffer[] mzWindowTripleBuffers = new ByteBuffer[windows.size()];
    for (int i = 0; i < mzWindowTripleBuffers.length; i++) {
        /* Note: the mapping between these buffers and their respective mzWindows is purely positional.  Specifically,
         * mzWindows.get(i).getIndex() != i, but mzWindowTripleBuffers[i] belongs to mzWindows.get(i).  We'll map windows
         * indices to the contents of mzWindowTripleBuffers at the very end of this function. */
        mzWindowTripleBuffers[i] = ByteBuffer.allocate(Long.BYTES * 4096); // Start with 4096 longs = 8 pages per window.
    }

    // Every TMzI gets an index which we'll use later when we're querying by m/z and time.
    long counter = -1; // We increment at the top of the loop.
    // Note: we could also write to an mmapped file and just track pointers, but then we might lose out on compression.

    // We allocate all the buffers strictly here, as we know how many bytes a long and a triple will take.  Then reuse!
    ByteBuffer counterBuffer = ByteBuffer.allocate(Long.BYTES);
    ByteBuffer valBuffer = ByteBuffer.allocate(TMzI.BYTES);
    List<Float> timepoints = new ArrayList<>(2000); // We can be sloppy here, as the count is small.

    /* We use a sweep-line approach to scanning through the m/z windows so that we can aggregate all intensities in
     * one pass over the current LCMSSpectrum (this saves us one inner loop in our extraction process).  The m/z
     * values in the LCMSSpectrum become our "critical" or "interesting points" over which we sweep our m/z ranges.
     * The next window in m/z order is guaranteed to be the next one we want to consider since we address the points
     * in m/z order as well.  As soon as we've passed out of the range of one of our windows, we discard it.  It is
     * valid for a window to be added to and discarded from the working queue in one application of the work loop. */
    LinkedList<MZWindow> tbdQueueTemplate = new LinkedList<>(windows); // We can reuse this template to init the sweep.

    int spectrumCounter = 0;
    while (iter.hasNext()) {
        LCMSSpectrum spectrum = iter.next();
        float time = spectrum.getTimeVal().floatValue();

        // This will record all the m/z + intensity readings that correspond to this timepoint.  Exactly sized too!
        ByteBuffer triplesForThisTime = ByteBuffer.allocate(Long.BYTES * spectrum.getIntensities().size());

        // Batch up all the triple writes to reduce the number of times we hit the disk in this loop.
        // Note: huge success!
        RocksDBAndHandles.RocksDBWriteBatch<ColumnFamilies> writeBatch = dbAndHandles.makeWriteBatch();

        // Initialize the sweep line lists.  Windows go follow: tbd -> working -> done (nowhere).
        LinkedList<MZWindow> workingQueue = new LinkedList<>();
        LinkedList<MZWindow> tbdQueue = (LinkedList<MZWindow>) tbdQueueTemplate.clone(); // clone is in the docs, so okay!
        for (Pair<Double, Double> mzIntensity : spectrum.getIntensities()) {
            // Very important: increment the counter for every triple.  Otherwise we'll overwrite triples = Very Bad (tm).
            counter++;

            // Brevity = soul of wit!
            Double mz = mzIntensity.getLeft();
            Double intensity = mzIntensity.getRight();

            // Reset the buffers so we end up re-using the few bytes we've allocated.
            counterBuffer.clear(); // Empty (virtually).
            counterBuffer.putLong(counter);
            counterBuffer.flip(); // Prep for reading.

            valBuffer.clear(); // Empty (virtually).
            TMzI.writeToByteBuffer(valBuffer, time, mz, intensity.floatValue());
            valBuffer.flip(); // Prep for reading.

            // First, shift any applicable ranges onto the working queue based on their minimum mz.
            while (!tbdQueue.isEmpty() && tbdQueue.peekFirst().getMin() <= mz) {
                workingQueue.add(tbdQueue.pop());
            }

            // Next, remove any ranges we've passed.
            while (!workingQueue.isEmpty() && workingQueue.peekFirst().getMax() < mz) {
                workingQueue.pop(); // TODO: add() this to a recovery queue which can then become the tbdQueue.  Edge cases!
            }
            /* In the old indexed trace extractor world, we could bail here if there were no target m/z's in our window set
             * that matched with the m/z of our current mzIntensity.  However, since we're now also recording the links
             * between timepoints and their (t, m/z, i) triples, we need to keep on keepin' on regardless of whether we have
             * any m/z windows in the working set right now. */

            // The working queue should now hold only ranges that include this m/z value.  Sweep line swept!

            /* Now add this intensity to the buffers of all the windows in the working queue.  Note that since we're only
             * storing the *index* of the triple, these buffers are going to consume less space than they would if we
             * stored everything together. */
            for (MZWindow window : workingQueue) {
                // TODO: count the number of times we add intensities to each window's accumulator for MS1-style warnings.
                counterBuffer.rewind(); // Already flipped.
                mzWindowTripleBuffers[window.getIndex()] = // Must assign when calling appendOrRealloc.
                        Utils.appendOrRealloc(mzWindowTripleBuffers[window.getIndex()], counterBuffer);
            }

            // We flipped after reading, so we should be good to rewind (to be safe) and write here.
            counterBuffer.rewind();
            valBuffer.rewind();
            writeBatch.put(ColumnFamilies.ID_TO_TRIPLE, Utils.toCompactArray(counterBuffer),
                    Utils.toCompactArray(valBuffer));

            // Rewind again for another read.
            counterBuffer.rewind();
            triplesForThisTime.put(counterBuffer);
        }

        writeBatch.write();

        assert (triplesForThisTime.position() == triplesForThisTime.capacity());

        ByteBuffer timeBuffer = ByteBuffer.allocate(Float.BYTES).putFloat(time);
        timeBuffer.flip(); // Prep both bufers for reading so they can be written to the DB.
        triplesForThisTime.flip();
        dbAndHandles.put(ColumnFamilies.TIMEPOINT_TO_TRIPLES, Utils.toCompactArray(timeBuffer),
                Utils.toCompactArray(triplesForThisTime));

        timepoints.add(time);

        spectrumCounter++;
        if (spectrumCounter % 1000 == 0) {
            LOGGER.info("Extracted %d time spectra", spectrumCounter);
        }
    }
    LOGGER.info("Extracted %d total time spectra", spectrumCounter);

    // Now write all the mzWindow to triple indexes.
    RocksDBAndHandles.RocksDBWriteBatch<ColumnFamilies> writeBatch = dbAndHandles.makeWriteBatch();
    ByteBuffer idBuffer = ByteBuffer.allocate(Integer.BYTES);
    for (int i = 0; i < mzWindowTripleBuffers.length; i++) {
        idBuffer.clear();
        idBuffer.putInt(windows.get(i).getIndex());
        idBuffer.flip();

        ByteBuffer triplesBuffer = mzWindowTripleBuffers[i];
        triplesBuffer.flip(); // Prep for read.

        writeBatch.put(ColumnFamilies.WINDOW_ID_TO_TRIPLES, Utils.toCompactArray(idBuffer),
                Utils.toCompactArray(triplesBuffer));
    }
    writeBatch.write();

    dbAndHandles.put(ColumnFamilies.TIMEPOINTS, TIMEPOINTS_KEY, Utils.floatListToByteArray(timepoints));
    dbAndHandles.flush(true);
}