Example usage for java.lang Float BYTES

List of usage examples for java.lang Float BYTES

Introduction

In this page you can find the example usage for java.lang Float BYTES.

Prototype

int BYTES

To view the source code for java.lang Float BYTES.

Click Source Link

Document

The number of bytes used to represent a float value.

Usage

From source file:com.act.lcms.v2.fullindex.Searcher.java

/**
 * Searches an LCMS index for all (time, m/z, intensity) triples within some time and m/z ranges.
 *
 * Note that this method is very much a first-draft/WIP.  There are many opportunities for optimization and
 * improvement here, but this works as an initial attempt.  This method is littered with TODOs, which once TODone
 * should make this a near optimal method of searching through LCMS readings.
 *
 * @param mzRange The range of m/z values for which to search.
 * @param timeRange The time range for which to search.
 * @return A list of (time, m/z, intensity) triples that fall within the specified ranges.
 * @throws RocksDBException/*from  w  w w.  ja v  a  2s.c  o  m*/
 * @throws ClassNotFoundException
 * @throws IOException
 */
public List<TMzI> searchIndexInRange(Pair<Double, Double> mzRange, Pair<Double, Double> timeRange)
        throws RocksDBException, ClassNotFoundException, IOException {
    // TODO: gracefully handle the case when only range is specified.
    // TODO: consider producing some sort of query plan structure that can be used for optimization/explanation.

    DateTime start = DateTime.now();
    /* Demote the time range to floats, as we know that that's how we stored times in the DB.  This tight coupling would
     * normally be a bad thing, but given that this class is joined at the hip with Builder necessarily, it
     * doesn't seem like a terrible thing at the moment. */
    Pair<Float, Float> tRangeF = // My kingdom for a functor!
            Pair.of(timeRange.getLeft().floatValue(), timeRange.getRight().floatValue());

    LOGGER.info("Running search for %.6f <= t <= %.6f, %.6f <= m/z <= %.6f", tRangeF.getLeft(),
            tRangeF.getRight(), mzRange.getLeft(), mzRange.getRight());

    // TODO: short circuit these filters.  The first failure after success => no more possible hits.
    List<Float> timesInRange = timepointsInRange(tRangeF);

    byte[][] timeIndexBytes = extractValueBytes(ColumnFamilies.TIMEPOINT_TO_TRIPLES, timesInRange, Float.BYTES,
            ByteBuffer::putFloat);
    // TODO: bail if all the timeIndexBytes lengths are zero.

    List<MZWindow> mzWindowsInRange = mzWindowsInRange(mzRange);

    byte[][] mzIndexBytes = extractValueBytes(ColumnFamilies.WINDOW_ID_TO_TRIPLES, mzWindowsInRange,
            Integer.BYTES, (buff, mz) -> buff.putInt(mz.getIndex()));
    // TODO: bail if all the mzIndexBytes are zero.

    /* TODO: if the number of entries in one range is significantly smaller than the other (like an order of magnitude
     * or more, skip extraction of the other set of ids and just filter at the end.  This will be especially helpful
     * when the number of ids in the m/z domain is small, as each time point will probably have >10k ids. */

    LOGGER.info("Found/loaded %d matching time ranges, %d matching m/z ranges", timesInRange.size(),
            mzWindowsInRange.size());

    // TODO: there is no need to union the time indices since they are necessarily distinct.  Just concatenate instead.
    Set<Long> unionTimeIds = unionIdBuffers(timeIndexBytes);
    Set<Long> unionMzIds = unionIdBuffers(mzIndexBytes);
    // TODO: handle the case where one of the sets is empty specially.  Either keep all in the other set or drop all.
    // TODO: we might be able to do this faster by intersecting two sorted lists.
    Set<Long> intersectionIds = new HashSet<>(unionTimeIds);
    /* TODO: this is effectively a hash join, which isn't optimal for sets of wildly different cardinalities.
     * Consider using sort-merge join instead, which will reduce the object overhead (by a lot) and allow us to pass
     * over the union of the ids from each range just once when joining them.  Additionally, just skip this whole step
     * and filter at the end if one of the set's sizes is less than 1k or so and the other is large. */
    intersectionIds.retainAll(unionMzIds);
    LOGGER.info("Id intersection results: t = %d, mz = %d, t ^ mz = %d", unionTimeIds.size(), unionMzIds.size(),
            intersectionIds.size());

    List<Long> idsToFetch = new ArrayList<>(intersectionIds);
    Collections.sort(idsToFetch); // Sort ids so we retrieve them in an order that exploits index locality.

    LOGGER.info("Collecting TMzI triples");
    // Collect all the triples for the ids we extracted.
    // TODO: don't manifest all the bytes: just create a stream of results from the cursor to reduce memory overhead.
    List<TMzI> results = new ArrayList<>(idsToFetch.size());
    byte[][] resultBytes = extractValueBytes(ColumnFamilies.ID_TO_TRIPLE, idsToFetch, Long.BYTES,
            ByteBuffer::putLong);
    for (byte[] tmziBytes : resultBytes) {
        results.add(TMzI.readNextFromByteBuffer(ByteBuffer.wrap(tmziBytes)));
    }

    // TODO: do this filtering inline with the extraction.  We shouldn't have to load all the triples before filtering.
    LOGGER.info("Performing final filtering");
    int preFilterTMzICount = results.size();
    results = results.stream()
            .filter(tmzi -> tmzi.getTime() >= tRangeF.getLeft() && tmzi.getTime() <= tRangeF.getRight()
                    && tmzi.getMz() >= mzRange.getLeft() && tmzi.getMz() <= mzRange.getRight())
            .collect(Collectors.toList());
    LOGGER.info("Precise filtering results: %d -> %d", preFilterTMzICount, results.size());

    DateTime end = DateTime.now();
    LOGGER.info("Search completed in %dms", end.getMillis() - start.getMillis());

    // TODO: return a stream instead that can load the triples lazily.
    return results;
}

From source file:com.act.lcms.v2.fullindex.Builder.java

protected void extractTriples(Iterator<LCMSSpectrum> iter, List<MZWindow> windows)
        throws RocksDBException, IOException {
    /* Warning: this method makes heavy use of ByteBuffers to perform memory efficient collection of values and
     * conversion of those values into byte arrays that RocksDB can consume.  If you haven't already, go read this
     * tutorial on ByteBuffers: http://mindprod.com/jgloss/bytebuffer.html
     *//  w  w w.  j  a v a  2 s .com
     * ByteBuffers are quite low-level structures, and they use some terms you need to watch out for:
     *   capacity: The total number of bytes in the array backing the buffer.  Don't write more than this.
     *   position: The next index in the buffer to read or write a byte.  Moves with each read or write op.
     *   limit:    A mark of where the final byte in the buffer was written.  Don't read past this.
     *             The remaining() call is affected by the limit.
     *   mark:     Ignore this for now, we don't use it.  (We'll always, always read buffers from 0.)
     *
     * And here are some methods that we'll use often:
     *   clear:     Set position = 0, limit = 0.  Pretend the buffer is empty, and is ready for more writes.
     *   flip:      Set limit = position, then position = 0.  This remembers how many bytes were written to the buffer
     *              (as the current position), and then puts the position at the beginning.
     *              Always call this after the write before a read.
     *   rewind:    Set position = 0.  Buffer is ready for reading, but unless the limit was set we might now know how
     *              many bytes there are to read.  Always call flip() before rewind().  Can rewind many times to re-read
     *              the buffer repeatedly.
     *   remaining: How many bytes do we have left to read?  Requires an accurate limit value to avoid garbage bytes.
     *   reset:     Don't use this.  It uses the mark, which we don't need currently.
     *
     * Write/read patterns look like:
     *   buffer.clear(); // Clear out anything already in the buffer.
     *   buffer.put(thing1).put(thing2)... // write a bunch of stuff
     *   buffer.flip(); // Prep for reading.  Call *once*!
     *
     *   while (buffer.hasRemaining()) { buffer.get(); } // Read a bunch of stuff.
     *   buffer.rewind(); // Ready for reading again!
     *   while (buffer.hasRemaining()) { buffer.get(); } // Etc.
     *   buffer.reset(); // Forget what was written previously, buffer is ready for reuse.
     *
     * We use byte buffers because they're fast, efficient, and offer incredibly convenient means of serializing a
     * stream of primitive types to their minimal binary representations.  The same operations on objects + object
     * streams require significantly more CPU cycles, consume more memory, and tend to be brittle (i.e. if a class
     * definition changes slightly, serialization may break).  Since the data we're dealing with is pretty simple, we
     * opt for the low-level approach.
     */

    /* Because we'll eventually use the window indices to map a mz range to a list of triples that fall within that
     * range, verify that all of the indices are unique.  If they're not, we'll end up overwriting the data in and
     * corrupting the structure of the index. */
    ensureUniqueMZWindowIndices(windows);

    // For every mz window, allocate a buffer to hold the indices of the triples that fall in that window.
    ByteBuffer[] mzWindowTripleBuffers = new ByteBuffer[windows.size()];
    for (int i = 0; i < mzWindowTripleBuffers.length; i++) {
        /* Note: the mapping between these buffers and their respective mzWindows is purely positional.  Specifically,
         * mzWindows.get(i).getIndex() != i, but mzWindowTripleBuffers[i] belongs to mzWindows.get(i).  We'll map windows
         * indices to the contents of mzWindowTripleBuffers at the very end of this function. */
        mzWindowTripleBuffers[i] = ByteBuffer.allocate(Long.BYTES * 4096); // Start with 4096 longs = 8 pages per window.
    }

    // Every TMzI gets an index which we'll use later when we're querying by m/z and time.
    long counter = -1; // We increment at the top of the loop.
    // Note: we could also write to an mmapped file and just track pointers, but then we might lose out on compression.

    // We allocate all the buffers strictly here, as we know how many bytes a long and a triple will take.  Then reuse!
    ByteBuffer counterBuffer = ByteBuffer.allocate(Long.BYTES);
    ByteBuffer valBuffer = ByteBuffer.allocate(TMzI.BYTES);
    List<Float> timepoints = new ArrayList<>(2000); // We can be sloppy here, as the count is small.

    /* We use a sweep-line approach to scanning through the m/z windows so that we can aggregate all intensities in
     * one pass over the current LCMSSpectrum (this saves us one inner loop in our extraction process).  The m/z
     * values in the LCMSSpectrum become our "critical" or "interesting points" over which we sweep our m/z ranges.
     * The next window in m/z order is guaranteed to be the next one we want to consider since we address the points
     * in m/z order as well.  As soon as we've passed out of the range of one of our windows, we discard it.  It is
     * valid for a window to be added to and discarded from the working queue in one application of the work loop. */
    LinkedList<MZWindow> tbdQueueTemplate = new LinkedList<>(windows); // We can reuse this template to init the sweep.

    int spectrumCounter = 0;
    while (iter.hasNext()) {
        LCMSSpectrum spectrum = iter.next();
        float time = spectrum.getTimeVal().floatValue();

        // This will record all the m/z + intensity readings that correspond to this timepoint.  Exactly sized too!
        ByteBuffer triplesForThisTime = ByteBuffer.allocate(Long.BYTES * spectrum.getIntensities().size());

        // Batch up all the triple writes to reduce the number of times we hit the disk in this loop.
        // Note: huge success!
        RocksDBAndHandles.RocksDBWriteBatch<ColumnFamilies> writeBatch = dbAndHandles.makeWriteBatch();

        // Initialize the sweep line lists.  Windows go follow: tbd -> working -> done (nowhere).
        LinkedList<MZWindow> workingQueue = new LinkedList<>();
        LinkedList<MZWindow> tbdQueue = (LinkedList<MZWindow>) tbdQueueTemplate.clone(); // clone is in the docs, so okay!
        for (Pair<Double, Double> mzIntensity : spectrum.getIntensities()) {
            // Very important: increment the counter for every triple.  Otherwise we'll overwrite triples = Very Bad (tm).
            counter++;

            // Brevity = soul of wit!
            Double mz = mzIntensity.getLeft();
            Double intensity = mzIntensity.getRight();

            // Reset the buffers so we end up re-using the few bytes we've allocated.
            counterBuffer.clear(); // Empty (virtually).
            counterBuffer.putLong(counter);
            counterBuffer.flip(); // Prep for reading.

            valBuffer.clear(); // Empty (virtually).
            TMzI.writeToByteBuffer(valBuffer, time, mz, intensity.floatValue());
            valBuffer.flip(); // Prep for reading.

            // First, shift any applicable ranges onto the working queue based on their minimum mz.
            while (!tbdQueue.isEmpty() && tbdQueue.peekFirst().getMin() <= mz) {
                workingQueue.add(tbdQueue.pop());
            }

            // Next, remove any ranges we've passed.
            while (!workingQueue.isEmpty() && workingQueue.peekFirst().getMax() < mz) {
                workingQueue.pop(); // TODO: add() this to a recovery queue which can then become the tbdQueue.  Edge cases!
            }
            /* In the old indexed trace extractor world, we could bail here if there were no target m/z's in our window set
             * that matched with the m/z of our current mzIntensity.  However, since we're now also recording the links
             * between timepoints and their (t, m/z, i) triples, we need to keep on keepin' on regardless of whether we have
             * any m/z windows in the working set right now. */

            // The working queue should now hold only ranges that include this m/z value.  Sweep line swept!

            /* Now add this intensity to the buffers of all the windows in the working queue.  Note that since we're only
             * storing the *index* of the triple, these buffers are going to consume less space than they would if we
             * stored everything together. */
            for (MZWindow window : workingQueue) {
                // TODO: count the number of times we add intensities to each window's accumulator for MS1-style warnings.
                counterBuffer.rewind(); // Already flipped.
                mzWindowTripleBuffers[window.getIndex()] = // Must assign when calling appendOrRealloc.
                        Utils.appendOrRealloc(mzWindowTripleBuffers[window.getIndex()], counterBuffer);
            }

            // We flipped after reading, so we should be good to rewind (to be safe) and write here.
            counterBuffer.rewind();
            valBuffer.rewind();
            writeBatch.put(ColumnFamilies.ID_TO_TRIPLE, Utils.toCompactArray(counterBuffer),
                    Utils.toCompactArray(valBuffer));

            // Rewind again for another read.
            counterBuffer.rewind();
            triplesForThisTime.put(counterBuffer);
        }

        writeBatch.write();

        assert (triplesForThisTime.position() == triplesForThisTime.capacity());

        ByteBuffer timeBuffer = ByteBuffer.allocate(Float.BYTES).putFloat(time);
        timeBuffer.flip(); // Prep both bufers for reading so they can be written to the DB.
        triplesForThisTime.flip();
        dbAndHandles.put(ColumnFamilies.TIMEPOINT_TO_TRIPLES, Utils.toCompactArray(timeBuffer),
                Utils.toCompactArray(triplesForThisTime));

        timepoints.add(time);

        spectrumCounter++;
        if (spectrumCounter % 1000 == 0) {
            LOGGER.info("Extracted %d time spectra", spectrumCounter);
        }
    }
    LOGGER.info("Extracted %d total time spectra", spectrumCounter);

    // Now write all the mzWindow to triple indexes.
    RocksDBAndHandles.RocksDBWriteBatch<ColumnFamilies> writeBatch = dbAndHandles.makeWriteBatch();
    ByteBuffer idBuffer = ByteBuffer.allocate(Integer.BYTES);
    for (int i = 0; i < mzWindowTripleBuffers.length; i++) {
        idBuffer.clear();
        idBuffer.putInt(windows.get(i).getIndex());
        idBuffer.flip();

        ByteBuffer triplesBuffer = mzWindowTripleBuffers[i];
        triplesBuffer.flip(); // Prep for read.

        writeBatch.put(ColumnFamilies.WINDOW_ID_TO_TRIPLES, Utils.toCompactArray(idBuffer),
                Utils.toCompactArray(triplesBuffer));
    }
    writeBatch.write();

    dbAndHandles.put(ColumnFamilies.TIMEPOINTS, TIMEPOINTS_KEY, Utils.floatListToByteArray(timepoints));
    dbAndHandles.flush(true);
}

From source file:ru.spbspu.viewer.DataView.java

private void forwardActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_forwardActionPerformed
    seekSlider.setValue(/*from www.ja va  2s .c o m*/
            seekSlider.getValue() + Integer.valueOf(spinnerFrameWidth.getValue().toString()) / Float.BYTES);
    _presenter.changeFrame();
}

From source file:ru.spbspu.viewer.DataView.java

private void backwardActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_backwardActionPerformed
    seekSlider.setValue(//from w  w w. ja v  a  2  s. co  m
            seekSlider.getValue() - Integer.valueOf(spinnerFrameWidth.getValue().toString()) / Float.BYTES);
    _presenter.changeFrame();
}

From source file:org.apache.druid.query.aggregation.histogram.ApproximateHistogramAggregatorFactory.java

@Override
public byte[] getCacheKey() {
    byte[] fieldNameBytes = StringUtils.toUtf8(fieldName);
    return ByteBuffer.allocate(1 + fieldNameBytes.length + Integer.BYTES * 2 + Float.BYTES * 2)
            .put(AggregatorUtil.APPROX_HIST_CACHE_TYPE_ID).put(fieldNameBytes).putInt(resolution)
            .putInt(numBuckets).putFloat(lowerLimit).putFloat(upperLimit).array();
}

From source file:org.apache.druid.query.aggregation.HistogramAggregatorFactory.java

@Override
public byte[] getCacheKey() {
    byte[] fieldNameBytes = StringUtils.toUtf8(fieldName);
    ByteBuffer buf = ByteBuffer.allocate(1 + fieldNameBytes.length + Float.BYTES * breaks.length)
            .put(AggregatorUtil.HIST_CACHE_TYPE_ID).put(fieldNameBytes).put((byte) 0xFF);
    buf.asFloatBuffer().put(breaks);//from   w w  w. j a v a2s .c  o  m

    return buf.array();
}

From source file:org.apache.druid.query.aggregation.HistogramAggregatorFactory.java

@Override
public int getMaxIntermediateSize() {
    return Long.BYTES * (breaks.length + 1) + Float.BYTES * 2;
}

From source file:org.apache.sysml.utils.PersistentLRUCache.java

public ValueWrapper put(String key, float[] value) throws FileNotFoundException, IOException {
    return putImplm(key, new ValueWrapper(new DataWrapper(key, value, this), isInReadOnlyMode),
            value.length * Float.BYTES);
}

From source file:org.apache.sysml.utils.PersistentLRUCache.java

static DataWrapper loadFloatArr(String key, PersistentLRUCache cache)
        throws FileNotFoundException, IOException {
    if (cache.isInReadOnlyMode)
        throw new DMLRuntimeException("Read-only mode is only supported for MatrixBlock.");
    if (!cache.persistedKeys.contains(key))
        throw new DMLRuntimeException("Cannot load the key that has not been persisted: " + key);
    if (PersistentLRUCache.LOG.isDebugEnabled())
        PersistentLRUCache.LOG.debug("Loading float array the key " + key + " from the disk.");
    float[] ret;/*  w w  w .j ava  2 s  . c  om*/
    try (ObjectInputStream is = new ObjectInputStream(new FileInputStream(cache.getFilePath(key)))) {
        int size = is.readInt();
        cache.ensureCapacity(size * Float.BYTES);
        ret = new float[size];
        for (int i = 0; i < size; i++) {
            ret[i] = is.readFloat();
        }
    }
    return new DataWrapper(key, ret, cache);
}

From source file:org.apache.sysml.utils.PersistentLRUCache.java

long getSize() {
    if (_dArr != null)
        return _dArr.length * Double.BYTES;
    else if (_fArr != null)
        return _fArr.length * Float.BYTES;
    else if (_mb != null)
        return _mb.getInMemorySize();
    else//ww  w .  j  av a  2s. c  o  m
        throw new DMLRuntimeException("Not implemented");
}