Example usage for java.nio ByteBuffer putShort

List of usage examples for java.nio ByteBuffer putShort

Introduction

In this page you can find the example usage for java.nio ByteBuffer putShort.

Prototype

public abstract ByteBuffer putShort(int index, short value);

Source Link

Document

Writes the given short to the specified index of this buffer.

Usage

From source file:Main.java

public static void main(String[] argv) throws Exception {
    ByteBuffer bbuf = ByteBuffer.allocate(10);
    int capacity = bbuf.capacity(); // 10
    System.out.println(capacity);
    bbuf.putShort(2, (short) 123);

    ByteBuffer bb = bbuf.compact();

    System.out.println(Arrays.toString(bb.array()));
}

From source file:Main.java

public static void putUnsignedShort(ByteBuffer bb, int position, int value) {
    bb.putShort(position, (short) (value & 0xffff));
}

From source file:org.cablelabs.playready.cryptfile.PlayReadyPSSH.java

@Override
public Element generateContentProtection(Document d) throws IOException {
    Element e = super.generateContentProtection(d);

    switch (cpType) {

    case CENC:/*from w  w w  .  j av  a2  s. c  om*/
        e.appendChild(generateCENCContentProtectionData(d));
        break;

    case MSPRO:
        Element pro = d.createElement(MSPRO_ELEMENT);

        // Generate base64-encoded PRO
        ByteBuffer ba = ByteBuffer.allocate(4);
        ba.order(ByteOrder.LITTLE_ENDIAN);
        ByteArrayOutputStream baos = new ByteArrayOutputStream();
        DataOutputStream dos = new DataOutputStream(baos);

        // PlayReady Header Object Size field
        ba.putInt(0, proSize);
        dos.write(ba.array());

        // Number of Records field
        ba.putShort(0, (short) wrmHeaders.size());
        dos.write(ba.array(), 0, 2);

        for (WRMHeader header : wrmHeaders) {

            byte[] wrmData = header.getWRMHeaderData();

            // Record Type (always 1 for WRM Headers)
            ba.putShort(0, (short) 1);
            dos.write(ba.array(), 0, 2);

            // Record Length
            ba.putShort(0, (short) wrmData.length);
            dos.write(ba.array(), 0, 2);

            // Data
            dos.write(wrmData);
        }

        pro.setTextContent(Base64.encodeBase64String(baos.toByteArray()));

        e.appendChild(pro);
        break;
    }

    return e;
}

From source file:io.druid.hll.HyperLogLogCollectorTest.java

@Test
public void testFoldWithUpperNibbleTriggersOffsetChange() throws Exception {
    byte[] arr1 = new byte[HyperLogLogCollector.getLatestNumBytesForDenseStorage()];
    Arrays.fill(arr1, (byte) 0x11);
    ByteBuffer buffer1 = ByteBuffer.wrap(arr1);
    buffer1.put(0, HLLCV1.VERSION);/* www  . j a v a  2s.  com*/
    buffer1.put(1, (byte) 0);
    buffer1.putShort(2, (short) (2047));
    buffer1.put(HLLCV1.HEADER_NUM_BYTES, (byte) 0x1);

    byte[] arr2 = new byte[HyperLogLogCollector.getLatestNumBytesForDenseStorage()];
    Arrays.fill(arr2, (byte) 0x11);
    ByteBuffer buffer2 = ByteBuffer.wrap(arr2);
    buffer2.put(0, HLLCV1.VERSION);
    buffer2.put(1, (byte) 0);
    buffer2.putShort(2, (short) (2048));

    HyperLogLogCollector collector = HyperLogLogCollector.makeCollector(buffer1);
    collector.fold(buffer2);

    ByteBuffer outBuffer = collector.toByteBuffer();

    Assert.assertEquals(outBuffer.get(), HLLCV1.VERSION);
    Assert.assertEquals(outBuffer.get(), 1);
    Assert.assertEquals(outBuffer.getShort(), 0);
    outBuffer.get();
    outBuffer.getShort();
    Assert.assertFalse(outBuffer.hasRemaining());
}

From source file:org.apache.druid.hll.HyperLogLogCollectorTest.java

@Test
public void testFoldWithUpperNibbleTriggersOffsetChange() {
    byte[] arr1 = new byte[HyperLogLogCollector.getLatestNumBytesForDenseStorage()];
    Arrays.fill(arr1, (byte) 0x11);
    ByteBuffer buffer1 = ByteBuffer.wrap(arr1);
    buffer1.put(0, VersionOneHyperLogLogCollector.VERSION);
    buffer1.put(1, (byte) 0);
    buffer1.putShort(2, (short) (2047));
    buffer1.put(VersionOneHyperLogLogCollector.HEADER_NUM_BYTES, (byte) 0x1);

    byte[] arr2 = new byte[HyperLogLogCollector.getLatestNumBytesForDenseStorage()];
    Arrays.fill(arr2, (byte) 0x11);
    ByteBuffer buffer2 = ByteBuffer.wrap(arr2);
    buffer2.put(0, VersionOneHyperLogLogCollector.VERSION);
    buffer2.put(1, (byte) 0);
    buffer2.putShort(2, (short) (2048));

    HyperLogLogCollector collector = HyperLogLogCollector.makeCollector(buffer1);
    collector.fold(buffer2);//from   ww w  .  ja va  2 s .co  m

    ByteBuffer outBuffer = collector.toByteBuffer();

    Assert.assertEquals(outBuffer.get(), VersionOneHyperLogLogCollector.VERSION);
    Assert.assertEquals(outBuffer.get(), 1);
    Assert.assertEquals(outBuffer.getShort(), 0);
    outBuffer.get();
    outBuffer.getShort();
    Assert.assertFalse(outBuffer.hasRemaining());
}

From source file:com.github.ambry.utils.UtilsTest.java

@Test
public void testReadBuffers() throws IOException {
    byte[] buf = new byte[40004];
    new Random().nextBytes(buf);
    ByteBuffer inputBuf = ByteBuffer.wrap(buf);
    inputBuf.putInt(0, 40000);//  w w w .  j a  v a  2s  . co  m
    ByteBuffer outputBuf = Utils.readIntBuffer(new DataInputStream(new ByteBufferInputStream(inputBuf)));
    for (int i = 0; i < 40000; i++) {
        Assert.assertEquals(buf[i + 4], outputBuf.array()[i]);
    }
    // 0 size
    inputBuf.rewind();
    inputBuf.putInt(0, 0);
    outputBuf = Utils.readIntBuffer(new DataInputStream(new ByteBufferInputStream(inputBuf)));
    Assert.assertEquals("Output should be of length 0", 0, outputBuf.array().length);
    // negative size
    inputBuf.rewind();
    inputBuf.putInt(0, -1);
    try {
        Utils.readIntBuffer(new DataInputStream(new ByteBufferInputStream(inputBuf)));
        Assert.fail("Should have encountered exception with negative length.");
    } catch (IllegalArgumentException e) {
    }

    buf = new byte[10];
    new Random().nextBytes(buf);
    inputBuf = ByteBuffer.wrap(buf);
    inputBuf.putShort(0, (short) 8);
    outputBuf = Utils.readShortBuffer(new DataInputStream(new ByteBufferInputStream(inputBuf)));
    for (int i = 0; i < 8; i++) {
        Assert.assertEquals(buf[i + 2], outputBuf.array()[i]);
    }
    // 0 size
    inputBuf.rewind();
    inputBuf.putShort(0, (short) 0);
    outputBuf = Utils.readShortBuffer(new DataInputStream(new ByteBufferInputStream(inputBuf)));
    Assert.assertEquals("Output should be of length 0", 0, outputBuf.array().length);
    // negative size
    inputBuf.rewind();
    inputBuf.putShort(0, (short) -1);
    try {
        Utils.readShortBuffer(new DataInputStream(new ByteBufferInputStream(inputBuf)));
        Assert.fail("Should have encountered exception with negative length.");
    } catch (IllegalArgumentException e) {
    }
}

From source file:com.healthmarketscience.jackcess.impl.TableImpl.java

/**
 * Updates free space and row info for a new row of the given size in the
 * given data page.  Positions the page for writing the row data.
 * @return the row number of the new row
 * @usage _advanced_method_/*from   w  w w. j a v a2s.  c o  m*/
 */
public static int addDataPageRow(ByteBuffer dataPage, int rowSize, JetFormat format, int rowFlags) {
    int rowSpaceUsage = getRowSpaceUsage(rowSize, format);

    // Decrease free space record.
    short freeSpaceInPage = dataPage.getShort(format.OFFSET_FREE_SPACE);
    dataPage.putShort(format.OFFSET_FREE_SPACE, (short) (freeSpaceInPage - rowSpaceUsage));

    // Increment row count record.
    short rowCount = dataPage.getShort(format.OFFSET_NUM_ROWS_ON_DATA_PAGE);
    dataPage.putShort(format.OFFSET_NUM_ROWS_ON_DATA_PAGE, (short) (rowCount + 1));

    // determine row position
    short rowLocation = findRowEnd(dataPage, rowCount, format);
    rowLocation -= rowSize;

    // write row position
    dataPage.putShort(getRowStartOffset(rowCount, format), (short) (rowLocation | rowFlags));

    // set position for row data
    dataPage.position(rowLocation);

    return rowCount;
}

From source file:com.healthmarketscience.jackcess.Table.java

/**
 * Writes a new table defined by the given TableCreator to the database.
 * @usage _advanced_method_/* w ww .j a  v a2  s  .co  m*/
 */
protected static void writeTableDefinition(TableCreator creator) throws IOException {
    // first, create the usage map page
    createUsageMapDefinitionBuffer(creator);

    // next, determine how big the table def will be (in case it will be more
    // than one page)
    JetFormat format = creator.getFormat();
    int idxDataLen = (creator.getIndexCount() * (format.SIZE_INDEX_DEFINITION + format.SIZE_INDEX_COLUMN_BLOCK))
            + (creator.getLogicalIndexCount() * format.SIZE_INDEX_INFO_BLOCK);
    int totalTableDefSize = format.SIZE_TDEF_HEADER
            + (format.SIZE_COLUMN_DEF_BLOCK * creator.getColumns().size()) + idxDataLen
            + format.SIZE_TDEF_TRAILER;

    // total up the amount of space used by the column and index names (2
    // bytes per char + 2 bytes for the length)
    for (Column col : creator.getColumns()) {
        int nameByteLen = (col.getName().length() * JetFormat.TEXT_FIELD_UNIT_SIZE);
        totalTableDefSize += nameByteLen + 2;
    }

    for (IndexBuilder idx : creator.getIndexes()) {
        int nameByteLen = (idx.getName().length() * JetFormat.TEXT_FIELD_UNIT_SIZE);
        totalTableDefSize += nameByteLen + 2;
    }

    // now, create the table definition
    PageChannel pageChannel = creator.getPageChannel();
    ByteBuffer buffer = pageChannel.createBuffer(Math.max(totalTableDefSize, format.PAGE_SIZE));
    writeTableDefinitionHeader(creator, buffer, totalTableDefSize);

    if (creator.hasIndexes()) {
        // index row counts
        IndexData.writeRowCountDefinitions(creator, buffer);
    }

    // column definitions
    Column.writeDefinitions(creator, buffer);

    if (creator.hasIndexes()) {
        // index and index data definitions
        IndexData.writeDefinitions(creator, buffer);
        Index.writeDefinitions(creator, buffer);
    }

    //End of tabledef
    buffer.put((byte) 0xff);
    buffer.put((byte) 0xff);

    // write table buffer to database
    if (totalTableDefSize <= format.PAGE_SIZE) {

        // easy case, fits on one page
        buffer.putShort(format.OFFSET_FREE_SPACE, (short) (buffer.remaining() - 8)); // overwrite page free space
        // Write the tdef page to disk.
        pageChannel.writePage(buffer, creator.getTdefPageNumber());

    } else {

        // need to split across multiple pages
        ByteBuffer partialTdef = pageChannel.createPageBuffer();
        buffer.rewind();
        int nextTdefPageNumber = PageChannel.INVALID_PAGE_NUMBER;
        while (buffer.hasRemaining()) {

            // reset for next write
            partialTdef.clear();

            if (nextTdefPageNumber == PageChannel.INVALID_PAGE_NUMBER) {

                // this is the first page.  note, the first page already has the
                // page header, so no need to write it here
                nextTdefPageNumber = creator.getTdefPageNumber();

            } else {

                // write page header
                writeTablePageHeader(partialTdef);
            }

            // copy the next page of tdef bytes
            int curTdefPageNumber = nextTdefPageNumber;
            int writeLen = Math.min(partialTdef.remaining(), buffer.remaining());
            partialTdef.put(buffer.array(), buffer.position(), writeLen);
            ByteUtil.forward(buffer, writeLen);

            if (buffer.hasRemaining()) {
                // need a next page
                nextTdefPageNumber = pageChannel.allocateNewPage();
                partialTdef.putInt(format.OFFSET_NEXT_TABLE_DEF_PAGE, nextTdefPageNumber);
            }

            // update page free space
            partialTdef.putShort(format.OFFSET_FREE_SPACE, (short) (partialTdef.remaining() - 8)); // overwrite page free space

            // write partial page to disk
            pageChannel.writePage(partialTdef, curTdefPageNumber);
        }

    }
}

From source file:com.healthmarketscience.jackcess.Table.java

/**
 * Delete the row on which the given rowState is currently positioned.
 * <p>//from  w ww  .  j  av  a 2s .  co m
 * Note, this method is not generally meant to be used directly.  You should
 * use the {@link #deleteCurrentRow} method or use the Cursor class, which
 * allows for more complex table interactions.
 * @usage _advanced_method_
 */
public void deleteRow(RowState rowState, RowId rowId) throws IOException {
    requireValidRowId(rowId);

    // ensure that the relevant row state is up-to-date
    ByteBuffer rowBuffer = positionAtRowHeader(rowState, rowId);

    requireNonDeletedRow(rowState, rowId);

    // delete flag always gets set in the "header" row (even if data is on
    // overflow row)
    int pageNumber = rowState.getHeaderRowId().getPageNumber();
    int rowNumber = rowState.getHeaderRowId().getRowNumber();

    // use any read rowValues to help update the indexes
    Object[] rowValues = (!_indexDatas.isEmpty() ? rowState.getRowValues() : null);

    int rowIndex = getRowStartOffset(rowNumber, getFormat());
    rowBuffer.putShort(rowIndex, (short) (rowBuffer.getShort(rowIndex) | DELETED_ROW_MASK | OVERFLOW_ROW_MASK));
    writeDataPage(rowBuffer, pageNumber);

    // update the indexes
    for (IndexData indexData : _indexDatas) {
        indexData.deleteRow(rowValues, rowId);
    }

    // make sure table def gets updated
    updateTableDefinition(-1);
}

From source file:edu.umn.cs.spatialHadoop.nasa.HDFRecordReader.java

/**
 * Recovers all missing entries using a two-dimensional interpolation technique.
 * @param values The dataset that need to be recovered
 * @param fillValue The marker that marks missing values
 * @param waterMask A bit-mask with <code>true</code> values in water areas
 * and <code>false</code> values for land areas.
 */// w ww  .  j  ava2 s .  co m
public static void recoverXYShorts(ByteBuffer values, short fillValue, BitArray waterMask) {
    // Resolution of the dataset which is the size of each of its two dimensions
    // e.g., 1200x1200, 2400x2400, or 4800x4800
    int resolution = (int) Math.sqrt(values.limit() / 2);
    // This array stores all the runs of true (non-fill) values. The size is
    // always even where the two values point to the first and last positions
    // of the run, respectively
    ShortArray[] trueRuns = findTrueRuns(values, fillValue);

    // Now, scan the dataset column by column to recover missing values
    for (short col = 0; col < resolution; col++) {
        // Find runs of fillValues and recover all of them
        short row1 = 0;
        while (row1 < resolution) {
            // Skip as many true values as we can
            while (row1 < resolution && values.getShort(2 * (row1 * resolution + col)) != fillValue)
                row1++;
            // Now, row1 points to the first fillValue
            if (row1 == resolution) {
                // All entries in the column have true values. No processing needed
                continue;
            }
            short row2 = (short) (row1 + 1);
            // Skip as many fillValues as we can
            while (row2 < resolution && values.getShort(2 * (row2 * resolution + col)) == fillValue)
                row2++;
            // Now, row2 points to a true value

            // Offsets of the four true values to the (top, bottom, left, right)
            short[] offsetsToInterpolate = { -1, -1, -1, -1 };
            short[] valuesToInterpolate = new short[4];
            if (row1 > 0) {
                offsetsToInterpolate[0] = (short) (row1 - 1);
                valuesToInterpolate[0] = values.getShort(2 * (offsetsToInterpolate[0] * resolution + col));
            }
            if (row2 < resolution) {
                offsetsToInterpolate[1] = row2;
                valuesToInterpolate[1] = values.getShort(2 * (offsetsToInterpolate[1] * resolution + col));
            }

            for (int row = row1; row < row2; row++) {
                if (values.getShort(2 * (row * resolution + col)) == fillValue
                        && !waterMask.get((row * resolution + col))) {
                    // The point at (row, col) is on land and has a fill (empty) value
                    // Find the position of the run in this row to find points to the left and right
                    int position = -trueRuns[row].binarySearch(col) - 1;
                    if (position > 0) {
                        // There's a true value to the left
                        offsetsToInterpolate[2] = trueRuns[row].get(position - 1);
                        valuesToInterpolate[2] = values
                                .getShort(2 * (row * resolution + offsetsToInterpolate[2]));
                    } else {
                        offsetsToInterpolate[2] = -1;
                    }
                    if (position < trueRuns[row].size()) {
                        // There's a true value to the right
                        offsetsToInterpolate[3] = trueRuns[row].get(position);
                        valuesToInterpolate[3] = values
                                .getShort(2 * (row * resolution + offsetsToInterpolate[3]));
                    } else {
                        offsetsToInterpolate[3] = -1;
                    }
                    short interpolatedValue = interpolatePoint(row, col, offsetsToInterpolate,
                            valuesToInterpolate, fillValue);
                    values.putShort(2 * (row * resolution + col), interpolatedValue);
                }
            }

            // Skip the current empty run and go to the next one
            row1 = row2;
        }
    }
}