Example usage for java.nio ByteBuffer putInt

List of usage examples for java.nio ByteBuffer putInt

Introduction

In this page you can find the example usage for java.nio ByteBuffer putInt.

Prototype

public abstract ByteBuffer putInt(int value);

Source Link

Document

Writes the given int to the current position and increases the position by 4.

Usage

From source file:org.opendaylight.netvirt.ipv6service.utils.Ipv6ServiceUtils.java

public byte[] convertIpv6HeaderToByte(Ipv6Header ip6Pdu) {
    byte[] data = new byte[128];
    Arrays.fill(data, (byte) 0);

    ByteBuffer buf = ByteBuffer.wrap(data);
    long flowLabel = (((long) (ip6Pdu.getVersion() & 0x0f) << 28) | (ip6Pdu.getFlowLabel() & 0x0fffffff));
    buf.putInt((int) flowLabel);
    buf.putShort((short) ip6Pdu.getIpv6Length().intValue());
    buf.put((byte) ip6Pdu.getNextHeader().shortValue());
    buf.put((byte) ip6Pdu.getHopLimit().shortValue());
    try {//  w w  w. j a  v a2 s .c  o  m
        byte[] baddr = InetAddress.getByName(ip6Pdu.getSourceIpv6().getValue()).getAddress();
        buf.put(baddr);
        baddr = InetAddress.getByName(ip6Pdu.getDestinationIpv6().getValue()).getAddress();
        buf.put(baddr);
    } catch (UnknownHostException e) {
        LOG.error("convertIpv6HeaderToByte: Failed to serialize src, dest address", e);
    }
    return data;
}

From source file:com.cinchapi.concourse.server.plugin.PluginManager.java

/**
 * Create a {@link SharedMemory} segment over which the PluginManager will
 * stream real-time {@link Packet packets} that contain writes.
 * //from  w  w  w . j a v  a2 s  . com
 * @param id the plugin id
 */
private void initRealTimeStream(String id) {
    String streamFile = FileSystem.tempFile();
    SharedMemory stream = new SharedMemory(streamFile);
    ByteBuffer payload = ByteBuffers.fromString(streamFile);
    ByteBuffer message = ByteBuffer.allocate(payload.capacity() + 4);
    message.putInt(Instruction.MESSAGE.ordinal());
    message.put(payload);
    SharedMemory fromServer = (SharedMemory) router.get(id, PluginInfoColumn.FROM_SERVER);
    fromServer.write(ByteBuffers.rewind(message));
    streams.add(stream);
}

From source file:com.cinchapi.concourse.server.plugin.PluginManager.java

/**
 * Invoke {@code method} that is defined in the plugin endpoint inside of
 * {@clazz}. The provided {@code creds}, {@code transaction} token and
 * {@code environment} are used to ensure proper alignment with the
 * corresponding client session on the server.
 * /*from  w ww.  j a  va  2s.  c  o  m*/
 * @param clazz the {@link Plugin} endpoint class
 * @param method the name of the method to invoke
 * @param args a list of arguments to pass to the method
 * @param creds the {@link AccessToken} submitted to ConcourseServer via the
 *            invokePlugin method
 * @param transaction the {@link TransactionToken} submitted to
 *            ConcourseServer via the invokePlugin method
 * @param environment the environment submitted to ConcourseServer via the
 *            invokePlugin method
 * @return the response from the plugin
 */
public ComplexTObject invoke(String clazz, String method, List<ComplexTObject> args, final AccessToken creds,
        TransactionToken transaction, String environment) {
    SharedMemory fromServer = (SharedMemory) router.get(clazz, PluginInfoColumn.FROM_SERVER);
    RemoteMethodRequest request = new RemoteMethodRequest(method, creds, transaction, environment, args);
    ByteBuffer data0 = Serializables.getBytes(request);
    ByteBuffer data = ByteBuffer.allocate(data0.capacity() + 4);
    data.putInt(Plugin.Instruction.REQUEST.ordinal());
    data.put(data0);
    fromServer.write(ByteBuffers.rewind(data));
    ConcurrentMap<AccessToken, RemoteMethodResponse> fromPluginResponses = (ConcurrentMap<AccessToken, RemoteMethodResponse>) router
            .get(clazz, PluginInfoColumn.FROM_PLUGIN_RESPONSES);
    RemoteMethodResponse response = ConcurrentMaps.waitAndRemove(fromPluginResponses, creds);
    if (!response.isError()) {
        return response.response;
    } else {
        throw Throwables.propagate(response.error);
    }
}

From source file:com.act.lcms.v2.fullindex.Builder.java

protected void extractTriples(Iterator<LCMSSpectrum> iter, List<MZWindow> windows)
        throws RocksDBException, IOException {
    /* Warning: this method makes heavy use of ByteBuffers to perform memory efficient collection of values and
     * conversion of those values into byte arrays that RocksDB can consume.  If you haven't already, go read this
     * tutorial on ByteBuffers: http://mindprod.com/jgloss/bytebuffer.html
     *//ww w  .ja  v  a 2  s  . c  om
     * ByteBuffers are quite low-level structures, and they use some terms you need to watch out for:
     *   capacity: The total number of bytes in the array backing the buffer.  Don't write more than this.
     *   position: The next index in the buffer to read or write a byte.  Moves with each read or write op.
     *   limit:    A mark of where the final byte in the buffer was written.  Don't read past this.
     *             The remaining() call is affected by the limit.
     *   mark:     Ignore this for now, we don't use it.  (We'll always, always read buffers from 0.)
     *
     * And here are some methods that we'll use often:
     *   clear:     Set position = 0, limit = 0.  Pretend the buffer is empty, and is ready for more writes.
     *   flip:      Set limit = position, then position = 0.  This remembers how many bytes were written to the buffer
     *              (as the current position), and then puts the position at the beginning.
     *              Always call this after the write before a read.
     *   rewind:    Set position = 0.  Buffer is ready for reading, but unless the limit was set we might now know how
     *              many bytes there are to read.  Always call flip() before rewind().  Can rewind many times to re-read
     *              the buffer repeatedly.
     *   remaining: How many bytes do we have left to read?  Requires an accurate limit value to avoid garbage bytes.
     *   reset:     Don't use this.  It uses the mark, which we don't need currently.
     *
     * Write/read patterns look like:
     *   buffer.clear(); // Clear out anything already in the buffer.
     *   buffer.put(thing1).put(thing2)... // write a bunch of stuff
     *   buffer.flip(); // Prep for reading.  Call *once*!
     *
     *   while (buffer.hasRemaining()) { buffer.get(); } // Read a bunch of stuff.
     *   buffer.rewind(); // Ready for reading again!
     *   while (buffer.hasRemaining()) { buffer.get(); } // Etc.
     *   buffer.reset(); // Forget what was written previously, buffer is ready for reuse.
     *
     * We use byte buffers because they're fast, efficient, and offer incredibly convenient means of serializing a
     * stream of primitive types to their minimal binary representations.  The same operations on objects + object
     * streams require significantly more CPU cycles, consume more memory, and tend to be brittle (i.e. if a class
     * definition changes slightly, serialization may break).  Since the data we're dealing with is pretty simple, we
     * opt for the low-level approach.
     */

    /* Because we'll eventually use the window indices to map a mz range to a list of triples that fall within that
     * range, verify that all of the indices are unique.  If they're not, we'll end up overwriting the data in and
     * corrupting the structure of the index. */
    ensureUniqueMZWindowIndices(windows);

    // For every mz window, allocate a buffer to hold the indices of the triples that fall in that window.
    ByteBuffer[] mzWindowTripleBuffers = new ByteBuffer[windows.size()];
    for (int i = 0; i < mzWindowTripleBuffers.length; i++) {
        /* Note: the mapping between these buffers and their respective mzWindows is purely positional.  Specifically,
         * mzWindows.get(i).getIndex() != i, but mzWindowTripleBuffers[i] belongs to mzWindows.get(i).  We'll map windows
         * indices to the contents of mzWindowTripleBuffers at the very end of this function. */
        mzWindowTripleBuffers[i] = ByteBuffer.allocate(Long.BYTES * 4096); // Start with 4096 longs = 8 pages per window.
    }

    // Every TMzI gets an index which we'll use later when we're querying by m/z and time.
    long counter = -1; // We increment at the top of the loop.
    // Note: we could also write to an mmapped file and just track pointers, but then we might lose out on compression.

    // We allocate all the buffers strictly here, as we know how many bytes a long and a triple will take.  Then reuse!
    ByteBuffer counterBuffer = ByteBuffer.allocate(Long.BYTES);
    ByteBuffer valBuffer = ByteBuffer.allocate(TMzI.BYTES);
    List<Float> timepoints = new ArrayList<>(2000); // We can be sloppy here, as the count is small.

    /* We use a sweep-line approach to scanning through the m/z windows so that we can aggregate all intensities in
     * one pass over the current LCMSSpectrum (this saves us one inner loop in our extraction process).  The m/z
     * values in the LCMSSpectrum become our "critical" or "interesting points" over which we sweep our m/z ranges.
     * The next window in m/z order is guaranteed to be the next one we want to consider since we address the points
     * in m/z order as well.  As soon as we've passed out of the range of one of our windows, we discard it.  It is
     * valid for a window to be added to and discarded from the working queue in one application of the work loop. */
    LinkedList<MZWindow> tbdQueueTemplate = new LinkedList<>(windows); // We can reuse this template to init the sweep.

    int spectrumCounter = 0;
    while (iter.hasNext()) {
        LCMSSpectrum spectrum = iter.next();
        float time = spectrum.getTimeVal().floatValue();

        // This will record all the m/z + intensity readings that correspond to this timepoint.  Exactly sized too!
        ByteBuffer triplesForThisTime = ByteBuffer.allocate(Long.BYTES * spectrum.getIntensities().size());

        // Batch up all the triple writes to reduce the number of times we hit the disk in this loop.
        // Note: huge success!
        RocksDBAndHandles.RocksDBWriteBatch<ColumnFamilies> writeBatch = dbAndHandles.makeWriteBatch();

        // Initialize the sweep line lists.  Windows go follow: tbd -> working -> done (nowhere).
        LinkedList<MZWindow> workingQueue = new LinkedList<>();
        LinkedList<MZWindow> tbdQueue = (LinkedList<MZWindow>) tbdQueueTemplate.clone(); // clone is in the docs, so okay!
        for (Pair<Double, Double> mzIntensity : spectrum.getIntensities()) {
            // Very important: increment the counter for every triple.  Otherwise we'll overwrite triples = Very Bad (tm).
            counter++;

            // Brevity = soul of wit!
            Double mz = mzIntensity.getLeft();
            Double intensity = mzIntensity.getRight();

            // Reset the buffers so we end up re-using the few bytes we've allocated.
            counterBuffer.clear(); // Empty (virtually).
            counterBuffer.putLong(counter);
            counterBuffer.flip(); // Prep for reading.

            valBuffer.clear(); // Empty (virtually).
            TMzI.writeToByteBuffer(valBuffer, time, mz, intensity.floatValue());
            valBuffer.flip(); // Prep for reading.

            // First, shift any applicable ranges onto the working queue based on their minimum mz.
            while (!tbdQueue.isEmpty() && tbdQueue.peekFirst().getMin() <= mz) {
                workingQueue.add(tbdQueue.pop());
            }

            // Next, remove any ranges we've passed.
            while (!workingQueue.isEmpty() && workingQueue.peekFirst().getMax() < mz) {
                workingQueue.pop(); // TODO: add() this to a recovery queue which can then become the tbdQueue.  Edge cases!
            }
            /* In the old indexed trace extractor world, we could bail here if there were no target m/z's in our window set
             * that matched with the m/z of our current mzIntensity.  However, since we're now also recording the links
             * between timepoints and their (t, m/z, i) triples, we need to keep on keepin' on regardless of whether we have
             * any m/z windows in the working set right now. */

            // The working queue should now hold only ranges that include this m/z value.  Sweep line swept!

            /* Now add this intensity to the buffers of all the windows in the working queue.  Note that since we're only
             * storing the *index* of the triple, these buffers are going to consume less space than they would if we
             * stored everything together. */
            for (MZWindow window : workingQueue) {
                // TODO: count the number of times we add intensities to each window's accumulator for MS1-style warnings.
                counterBuffer.rewind(); // Already flipped.
                mzWindowTripleBuffers[window.getIndex()] = // Must assign when calling appendOrRealloc.
                        Utils.appendOrRealloc(mzWindowTripleBuffers[window.getIndex()], counterBuffer);
            }

            // We flipped after reading, so we should be good to rewind (to be safe) and write here.
            counterBuffer.rewind();
            valBuffer.rewind();
            writeBatch.put(ColumnFamilies.ID_TO_TRIPLE, Utils.toCompactArray(counterBuffer),
                    Utils.toCompactArray(valBuffer));

            // Rewind again for another read.
            counterBuffer.rewind();
            triplesForThisTime.put(counterBuffer);
        }

        writeBatch.write();

        assert (triplesForThisTime.position() == triplesForThisTime.capacity());

        ByteBuffer timeBuffer = ByteBuffer.allocate(Float.BYTES).putFloat(time);
        timeBuffer.flip(); // Prep both bufers for reading so they can be written to the DB.
        triplesForThisTime.flip();
        dbAndHandles.put(ColumnFamilies.TIMEPOINT_TO_TRIPLES, Utils.toCompactArray(timeBuffer),
                Utils.toCompactArray(triplesForThisTime));

        timepoints.add(time);

        spectrumCounter++;
        if (spectrumCounter % 1000 == 0) {
            LOGGER.info("Extracted %d time spectra", spectrumCounter);
        }
    }
    LOGGER.info("Extracted %d total time spectra", spectrumCounter);

    // Now write all the mzWindow to triple indexes.
    RocksDBAndHandles.RocksDBWriteBatch<ColumnFamilies> writeBatch = dbAndHandles.makeWriteBatch();
    ByteBuffer idBuffer = ByteBuffer.allocate(Integer.BYTES);
    for (int i = 0; i < mzWindowTripleBuffers.length; i++) {
        idBuffer.clear();
        idBuffer.putInt(windows.get(i).getIndex());
        idBuffer.flip();

        ByteBuffer triplesBuffer = mzWindowTripleBuffers[i];
        triplesBuffer.flip(); // Prep for read.

        writeBatch.put(ColumnFamilies.WINDOW_ID_TO_TRIPLES, Utils.toCompactArray(idBuffer),
                Utils.toCompactArray(triplesBuffer));
    }
    writeBatch.write();

    dbAndHandles.put(ColumnFamilies.TIMEPOINTS, TIMEPOINTS_KEY, Utils.floatListToByteArray(timepoints));
    dbAndHandles.flush(true);
}

From source file:net.dv8tion.jda.core.audio.AudioWebSocket.java

private InetSocketAddress handleUdpDiscovery(InetSocketAddress address, int ssrc) {
    //We will now send a packet to discord to punch a port hole in the NAT wall.
    //This is called UDP hole punching.
    try {/*from   w  ww. jav a  2  s. c  o  m*/
        udpSocket = new DatagramSocket(); //Use UDP, not TCP.

        //Create a byte array of length 70 containing our ssrc.
        ByteBuffer buffer = ByteBuffer.allocate(70); //70 taken from https://github.com/Rapptz/discord.py/blob/async/discord/voice_client.py#L208
        buffer.putInt(ssrc); //Put the ssrc that we were given into the packet to send back to discord.

        //Construct our packet to be sent loaded with the byte buffer we store the ssrc in.
        DatagramPacket discoveryPacket = new DatagramPacket(buffer.array(), buffer.array().length, address);
        udpSocket.send(discoveryPacket);

        //Discord responds to our packet, returning a packet containing our external ip and the port we connected through.
        DatagramPacket receivedPacket = new DatagramPacket(new byte[70], 70); //Give a buffer the same size as the one we sent.
        udpSocket.setSoTimeout(1000);
        udpSocket.receive(receivedPacket);

        //The byte array returned by discord containing our external ip and the port that we used
        //to connect to discord with.
        byte[] received = receivedPacket.getData();

        //Example string:"   121.83.253.66                                                   "
        //You'll notice that there are 4 leading nulls and a large amount of nulls between the the ip and
        // the last 2 bytes. Not sure why these exist.  The last 2 bytes are the port. More info below.
        String ourIP = new String(receivedPacket.getData());//Puts the entire byte array in. nulls are converted to spaces.
        ourIP = ourIP.substring(4, ourIP.length() - 2); //Removes the SSRC of the answer package and the port that is stuck on the end of this string. (last 2 bytes are the port)
        ourIP = ourIP.trim(); //Removes the extra whitespace(nulls) attached to both sides of the IP

        //The port exists as the last 2 bytes in the packet data, and is encoded as an UNSIGNED short.
        //Furthermore, it is stored in Little Endian instead of normal Big Endian.
        //We will first need to convert the byte order from Little Endian to Big Endian (reverse the order)
        //Then we will need to deal with the fact that the bytes represent an unsigned short.
        //Java cannot deal with unsigned types, so we will have to promote the short to a higher type.
        //Options:  char or int.  I will be doing int because it is just easier to work with.
        byte[] portBytes = new byte[2]; //The port is exactly 2 bytes in size.
        portBytes[0] = received[received.length - 1]; //Get the second byte and store as the first
        portBytes[1] = received[received.length - 2]; //Get the first byte and store as the second.
        //We have now effectively converted from Little Endian -> Big Endian by reversing the order.

        //For more information on how this is converting from an unsigned short to an int refer to:
        //http://www.darksleep.com/player/JavaAndUnsignedTypes.html
        int firstByte = (0x000000FF & ((int) portBytes[0])); //Promotes to int and handles the fact that it was unsigned.
        int secondByte = (0x000000FF & ((int) portBytes[1])); //

        //Combines the 2 bytes back together.
        int ourPort = (firstByte << 8) | secondByte;

        this.address = address;

        return new InetSocketAddress(ourIP, ourPort);
    } catch (SocketException e) {
        return null;
    } catch (IOException e) {
        return null;
    }
}

From source file:org.kalypso.grid.BinaryGeoGrid.java

/**
 * @param fillGrid/*from   ww  w  . j av a 2 s  .  c o  m*/
 *          If set to <code>true</code>, the grid will be initially filled with no-data values. Else, the grid values
 *          are undetermined.
 */
public BinaryGeoGrid(final FileChannel channel, final int sizeX, final int sizeY, final int scale,
        final Coordinate origin, final Coordinate offsetX, final Coordinate offsetY, final String sourceCRS,
        final boolean fillGrid) throws GeoGridException {
    super(origin, offsetX, offsetY, sourceCRS);

    m_readBuffer = ByteBuffer.allocate(4 * sizeX * BUFFER_LINES);
    m_readBuffer.order(ByteOrder.BIG_ENDIAN);

    /* create write buffer, also marks this grid as writable */
    m_writeBuffer = ByteBuffer.allocate(4);
    m_writeBuffer.order(ByteOrder.BIG_ENDIAN);

    try {
        m_channel = channel;
        m_binFile = null;

        m_header = new BinaryGeoGridHeader(sizeX, sizeY, scale);

        m_unscaledMin = null;
        m_unscaledMax = null;

        /* Initialize grid */
        // m_randomAccessFile.setLength( HEADER_SIZE + sizeX * sizeY * 4 + 2 * 4 );
        m_channel.truncate(BinaryGeoGridHeader.HEADER_SIZE + sizeX * sizeY * 4 + 2 * 4);

        /* Read header */
        m_channel.position(0);
        m_header.write(m_channel);

        /* Set everything to non-data */
        if (fillGrid) {
            final ByteBuffer buffer = ByteBuffer.allocate(sizeX * 4);
            for (int y = 0; y < sizeY; y++) {
                buffer.rewind();
                for (int x = 0; x < sizeX; x++)
                    buffer.putInt(NO_DATA);
                m_channel.write(buffer);
            }
        }

        /* Read statistical data */
        saveStatistically();
    } catch (final IOException e) {
        throw new GeoGridException("Failed to initiate random access file", e);
    }
}

From source file:org.midonet.netlink.rtnetlink.Link.java

@Override
public int serializeInto(ByteBuffer buf) {
    int start = buf.position();
    buf.put(ifi.family);/*from  w  ww .  j av  a2  s.c  o  m*/
    buf.put((byte) 0);
    buf.putShort(ifi.type);
    buf.putInt(ifi.index);
    buf.putInt(ifi.flags);
    buf.putInt(ifi.change);

    if (ifname != null) {
        NetlinkMessage.writeStringAttr(buf, Attr.IFLA_IFNAME, ifname);
    }

    if (mac != null) {
        NetlinkMessage.writeRawAttribute(buf, Attr.IFLA_ADDRESS, mac.getAddress());
    }

    if (mtu > 0) {
        NetlinkMessage.writeIntAttr(buf, Attr.IFLA_MTU, mtu);
    }

    if (masterIndex != 0) {
        NetlinkMessage.writeIntAttr(buf, Attr.IFLA_MASTER, masterIndex);
    }

    if (operstate != OperStatus.IF_OPER_UNKNOWN) {
        NetlinkMessage.writeByteAttr(buf, Attr.IFLA_OPERSTATE, operstate);
    }

    NetlinkMessage.writeAttrNested(buf, Attr.IFLA_LINKINFO, info);

    return buf.position() - start;
}

From source file:org.apache.geode.pdx.internal.PdxInstanceImpl.java

public byte[] toBytes() {
    PdxReaderImpl ur = getUnmodifiableReader();
    if (ur.getPdxType().getHasDeletedField()) {
        PdxWriterImpl writer = convertToTypeWithNoDeletedFields(ur);
        return writer.toByteArray();
    } else {/*from w w w  . j a  v  a 2  s. co  m*/
        byte[] result = new byte[PdxWriterImpl.HEADER_SIZE + ur.basicSize()];
        ByteBuffer bb = ByteBuffer.wrap(result);
        bb.put(DSCODE.PDX);
        bb.putInt(ur.basicSize());
        bb.putInt(ur.getPdxType().getTypeId());
        ur.basicSendTo(bb);
        return result;
    }
}

From source file:org.apache.tez.runtime.library.common.writers.TestUnorderedPartitionedKVWriter.java

private OutputContext createMockOutputContext(TezCounters counters, ApplicationId appId, String uniqueId) {
    OutputContext outputContext = mock(OutputContext.class);
    doReturn(counters).when(outputContext).getCounters();
    doReturn(appId).when(outputContext).getApplicationId();
    doReturn(1).when(outputContext).getDAGAttemptNumber();
    doReturn("dagName").when(outputContext).getDAGName();
    doReturn("destinationVertexName").when(outputContext).getDestinationVertexName();
    doReturn(1).when(outputContext).getOutputIndex();
    doReturn(1).when(outputContext).getTaskAttemptNumber();
    doReturn(1).when(outputContext).getTaskIndex();
    doReturn(1).when(outputContext).getTaskVertexIndex();
    doReturn("vertexName").when(outputContext).getTaskVertexName();
    doReturn(uniqueId).when(outputContext).getUniqueIdentifier();
    ByteBuffer portBuffer = ByteBuffer.allocate(4);
    portBuffer.mark();//from  w  ww  .j  a  v a  2  s  .  c  o  m
    portBuffer.putInt(SHUFFLE_PORT);
    portBuffer.reset();
    doReturn(portBuffer).when(outputContext)
            .getServiceProviderMetaData(eq(ShuffleUtils.SHUFFLE_HANDLER_SERVICE_ID));
    Path outDirBase = new Path(TEST_ROOT_DIR, "outDir_" + uniqueId);
    String[] outDirs = new String[] { outDirBase.toString() };
    doReturn(outDirs).when(outputContext).getWorkDirs();
    return outputContext;
}