Example usage for java.nio ByteBuffer putInt

List of usage examples for java.nio ByteBuffer putInt

Introduction

In this page you can find the example usage for java.nio ByteBuffer putInt.

Prototype

public abstract ByteBuffer putInt(int index, int value);

Source Link

Document

Writes the given int to the specified index of this buffer.

Usage

From source file:edu.mbl.jif.imaging.mmtiff.MultipageTiffWriter.java

private void writeIFDEntry(ByteBuffer buffer, CharBuffer cBuffer, char tag, char type, long count, long value)
        throws IOException {
    cBuffer.put(bufferPosition_ / 2, tag);
    cBuffer.put(bufferPosition_ / 2 + 1, type);
    buffer.putInt(bufferPosition_ + 4, (int) count);
    if (type == 3 && count == 1) { //Left justify in 4 byte value field
        cBuffer.put(bufferPosition_ / 2 + 4, (char) value);
        cBuffer.put(bufferPosition_ / 2 + 5, (char) 0);
    } else {//from  ww  w.j av a 2s  . co  m
        buffer.putInt(bufferPosition_ + 8, (int) value);
    }
    bufferPosition_ += 12;
}

From source file:org.apache.hadoop.raid.JRSEncoder.java

protected void encodeStripe(InputStream[] blocks, long stripeStartOffset, long blockSize, byte[][] bufs,
        Progressable reporter) throws IOException {

    try {//  w  w  w .ja  v  a2  s. c  o m
        //trigger, pass file info
        for (int i = 0; i < threadNum; i++)
            fq[i].put(bufs);
    } catch (InterruptedException e) {
    }

    //seq number
    int s = 0;

    //number of data read
    int read = 0;

    //useless
    int cap = 1 + 11 * threadNum;

    //ByteBuffer[] buf = new ByteBuffer[cap];
    //use buffer to pass data, can be replaced by Byte[]
    ByteBuffer buf;

    while (read < blockSize) {
        //indecate the last threadNum# packet
        boolean important = false;

        //useless
        int idx = s % cap;
        //if(buf[idx] == null) buf[idx] = ByteBuffer.allocate(bufSize*stripeSize+5);

        //initial buffer
        buf = ByteBuffer.allocate(bufSize * stripeSize + 64);
        //buf[idx].putInt(0, s);

        //seq number
        buf.putInt(stripeSize * bufSize, s);

        //check whether the last threadNum# packet
        if ((blockSize - read + bufSize - 1) / bufSize <= threadNum) {
            important = true;
            //buf[idx].put(4, (byte)1);
            buf.put(4 + stripeSize * bufSize, (byte) 1);
        } else {
            //buf[idx].put(4, (byte)0);
            buf.put(4 + stripeSize * bufSize, (byte) 0);
        }

        byte[] bufarr = buf.array();
        LOG.info("anchor Encode_stripe " + s + " Data_start_reading " + System.nanoTime());
        for (int i = 0; i < stripeSize; i++) {
            try {
                //RaidUtils.readTillEnd(blocks[i], buf[idx].array(), true, 5+i*bufSize, bufSize);
                //read data
                RaidUtils.readTillEnd(blocks[i], bufarr, true, i * bufSize, bufSize);
            } catch (IOException e) {
            }
        }
        //LOG.info(s+" read: "+bufarr[5]+" "+bufarr[5+bufSize]+" "+bufarr[5+bufSize*2]);
        LOG.info("anchor Encode_stripe " + s + " Data_read " + System.nanoTime());
        //buf[idx].rewind();

        //update position
        buf.rewind();

        int remain = -1;
        int chosen = -1;
        //check the most free ring buffer
        for (int i = 0; i < threadNum; i++) {
            int rc = q[i].remainingCapacity();
            if (remain < rc) {
                remain = rc;
                chosen = i;
            }
        }

        //decide to put the data to which ring buffer
        if (important) {
            chosen = (((int) blockSize - read + bufSize - 1) / bufSize - 1) % threadNum;
        }

        //LOG.info("chosen number: "+chosen+" with seq: "+s+" and buf idx: "+idx);
        try {
            //out[chosen].put(buf[idx]);
            q[chosen].put(buf);
        } catch (InterruptedException e) {
        }
        LOG.info("anchor Encode_stripe " + s + " Data_pushed " + System.nanoTime());

        //update status
        s++;
        read += bufSize;
    }
}

From source file:edu.mbl.jif.imaging.mmtiff.MultipageTiffWriter.java

private void writeMMHeaderAndSummaryMD(JSONObject summaryMD) throws IOException {
    if (summaryMD.has("Comment")) {
        summaryMD.remove("Comment");
    }//from  w  w w  . j a v a 2s. co  m
    String summaryMDString = summaryMD.toString();
    int mdLength = summaryMDString.length();
    ByteBuffer buffer = ByteBuffer.allocate(40).order(BYTE_ORDER);
    if (BYTE_ORDER.equals(ByteOrder.BIG_ENDIAN)) {
        buffer.asCharBuffer().put(0, (char) 0x4d4d);
    } else {
        buffer.asCharBuffer().put(0, (char) 0x4949);
    }
    buffer.asCharBuffer().put(1, (char) 42);
    buffer.putInt(4, 40 + mdLength);
    //8 bytes for file header +
    //8 bytes for index map offset header and offset +
    //8 bytes for display settings offset header and display settings offset
    //8 bytes for comments offset header and comments offset
    //8 bytes for summaryMD header  summary md length + 
    //1 byte for each character of summary md     
    buffer.putInt(32, SUMMARY_MD_HEADER);
    buffer.putInt(36, mdLength);
    ByteBuffer[] buffers = new ByteBuffer[2];
    buffers[0] = buffer;
    buffers[1] = ByteBuffer.wrap(getBytesFromString(summaryMDString));
    fileChannel_.write(buffers);
    filePosition_ += buffer.position() + mdLength;
}

From source file:com.koda.integ.hbase.blockcache.OffHeapBlockCacheOld.java

/**
 * Store external.//from   w  ww .  j  a v a  2  s.c  o m
 *
 * @param blockName the block name
 * @param buf the buf
 * @param inMemory the in memory
 * @throws IOException Signals that an I/O exception has occurred.
 */
@SuppressWarnings("unused")
private void storeExternal(String blockName, Cacheable buf, boolean inMemory) throws IOException {
    // If external storage is disable - bail out
    if (overflowExtEnabled == false)
        return;
    // Check if we have  already this block in external storage cache
    if (extStorageCache.contains(blockName))
        return;

    ByteBuffer buffer = extStorageCache.getLocalBufferWithAddress().getBuffer();
    deserializer.set(buf.getDeserializer());
    buffer.clear();

    buffer.position(4);
    buffer.put(inMemory ? (byte) 1 : (byte) 0);
    buf.serialize(buffer);
    buffer.putInt(0, buffer.position() - 4);

    StorageHandle handle = storage.storeData(buffer);

    try {
        extStorageCache.put(blockName, handle);
    } catch (Exception e) {
        throw new IOException(e);
    }

}

From source file:org.apache.hadoop.raid.IAEncoder.java

protected void encodeStripe(InputStream[] blocks, long stripeStartOffset, long blockSize, byte[][] bufs,
        Progressable reporter) throws IOException {

    try {//  ww  w  .  j a va  2 s. c  o  m
        //trigger, pass file info
        for (int i = 0; i < threadNum; i++)
            fq[i].put(bufs);
    } catch (InterruptedException e) {
    }

    //seq number
    int s = 0;

    //number of data read
    int read = 0;

    //useless
    int cap = 1 + 11 * threadNum;

    //ByteBuffer[] buf = new ByteBuffer[cap];
    //use buffer to pass data, can be replaced by Byte[]
    ByteBuffer buf;

    while (read < blockSize) {
        //LOG.info("start read round: "+read);
        //indecate the last threadNum# packet
        boolean important = false;

        //useless
        int idx = s % cap;
        //if(buf[idx] == null) buf[idx] = ByteBuffer.allocate(bufSize*stripeSize+5);

        //initial buffer
        //LOG.info("allocating buffer");
        buf = ByteBuffer.allocate(bufSize * stripeSize + 64);
        //LOG.info("allocated buffer");
        //buf[idx].putInt(0, s);

        //seq number
        buf.putInt(stripeSize * bufSize, s);

        //check whether the last threadNum# packet
        if ((blockSize - read + bufSize - 1) / bufSize <= threadNum) {
            important = true;
            //buf[idx].put(4, (byte)1);
            buf.put(4 + stripeSize * bufSize, (byte) 1);
        } else {
            //buf[idx].put(4, (byte)0);
            buf.put(4 + stripeSize * bufSize, (byte) 0);
        }

        byte[] bufarr = buf.array();
        LOG.info("anchor Encode_stripe " + s + " Data_start_reading " + System.nanoTime());
        for (int i = 0; i < stripeSize; i++) {
            try {
                //RaidUtils.readTillEnd(blocks[i], buf[idx].array(), true, 5+i*bufSize, bufSize);
                //read data
                RaidUtils.readTillEnd(blocks[i], bufarr, true, i * bufSize, bufSize);
            } catch (IOException e) {
            }
        }
        //LOG.info(s+" read: "+bufarr[5]+" "+bufarr[5+bufSize]+" "+bufarr[5+bufSize*2]);
        LOG.info("anchor Encode_stripe " + s + " Data_read " + System.nanoTime());
        //buf[idx].rewind();

        //update position
        buf.rewind();

        int remain = -1;
        int chosen = -1;
        //check the most free ring buffer
        for (int i = 0; i < threadNum; i++) {
            int rc = q[i].remainingCapacity();
            if (remain < rc) {
                remain = rc;
                chosen = i;
            }
        }

        //decide to put the data to which ring buffer
        if (important) {
            chosen = (((int) blockSize - read + bufSize - 1) / bufSize - 1) % threadNum;
            //LOG.info("Important choose "+chosen);
        }

        //LOG.info("chosen number: "+chosen+" with seq: "+s);
        try {
            //out[chosen].put(buf[idx]);
            q[chosen].put(buf);
        } catch (InterruptedException e) {
        }
        LOG.info("anchor Encode_stripe " + s + " Data_pushed " + System.nanoTime());

        //update status
        s++;
        read += bufSize;
        //LOG.info("read: "+read);
    }
}

From source file:edu.mbl.jif.imaging.mmtiff.MultipageTiffWriter.java

/**
 * writes channel LUTs and display ranges for composite mode Could also be
 * expanded to write ROIs, file info, slice labels, and overlays
 *//*from  ww w .j  a v  a  2s. co  m*/
private void writeImageJMetadata(int numChannels, String summaryComment) throws IOException {
    String info = summaryMDString_;
    if (summaryComment != null && summaryComment.length() > 0) {
        info = "Acquisition comments: \n" + summaryComment + "\n\n\n" + summaryMDString_;
    }
    //size entry (4 bytes) + 4 bytes file info size + 4 bytes for channel display 
    //ranges length + 4 bytes per channel LUT
    int mdByteCountsBufferSize = 4 + 4 + 4 + 4 * numChannels;
    int bufferPosition = 0;

    ByteBuffer mdByteCountsBuffer = ByteBuffer.allocate(mdByteCountsBufferSize).order(BYTE_ORDER);

    //nTypes is number actually written among: fileInfo, slice labels, display ranges, channel LUTS,
    //slice labels, ROI, overlay, and # of extra metadata entries
    int nTypes = 3; //file info, display ranges, and channel LUTs
    int mdBufferSize = 4 + nTypes * 8;

    //Header size: 4 bytes for magic number + 8 bytes for label (int) and count (int) of each type
    mdByteCountsBuffer.putInt(bufferPosition, 4 + nTypes * 8);
    bufferPosition += 4;

    //2 bytes per a character of file info
    mdByteCountsBuffer.putInt(bufferPosition, 2 * info.length());
    bufferPosition += 4;
    mdBufferSize += info.length() * 2;

    //display ranges written as array of doubles (min, max, min, max, etc)
    mdByteCountsBuffer.putInt(bufferPosition, numChannels * 2 * 8);
    bufferPosition += 4;
    mdBufferSize += numChannels * 2 * 8;

    for (int i = 0; i < numChannels; i++) {
        //768 bytes per LUT
        mdByteCountsBuffer.putInt(bufferPosition, 768);
        bufferPosition += 4;
        mdBufferSize += 768;
    }

    //Header (1) File info (1) display ranges (1) LUTS (1 per channel)
    int numMDEntries = 3 + numChannels;
    ByteBuffer ifdCountAndValueBuffer = ByteBuffer.allocate(8).order(BYTE_ORDER);
    ifdCountAndValueBuffer.putInt(0, numMDEntries);
    ifdCountAndValueBuffer.putInt(4, (int) filePosition_);
    fileChannel_.write(ifdCountAndValueBuffer, ijMetadataCountsTagPosition_ + 4);

    fileChannel_.write(mdByteCountsBuffer, filePosition_);
    filePosition_ += mdByteCountsBufferSize;

    //Write metadata types and counts
    ByteBuffer mdBuffer = ByteBuffer.allocate(mdBufferSize).order(BYTE_ORDER);
    bufferPosition = 0;

    //All the ints declared below are non public field in TiffDecoder
    final int ijMagicNumber = 0x494a494a;
    mdBuffer.putInt(bufferPosition, ijMagicNumber);
    bufferPosition += 4;

    //Write ints for each IJ metadata field and its count
    final int fileInfo = 0x696e666f;
    mdBuffer.putInt(bufferPosition, fileInfo);
    bufferPosition += 4;
    mdBuffer.putInt(bufferPosition, 1);
    bufferPosition += 4;

    final int displayRanges = 0x72616e67;
    mdBuffer.putInt(bufferPosition, displayRanges);
    bufferPosition += 4;
    mdBuffer.putInt(bufferPosition, 1);
    bufferPosition += 4;

    final int luts = 0x6c757473;
    mdBuffer.putInt(bufferPosition, luts);
    bufferPosition += 4;
    mdBuffer.putInt(bufferPosition, numChannels);
    bufferPosition += 4;

    //write actual metadata
    //FileInfo
    for (char c : info.toCharArray()) {
        mdBuffer.putChar(bufferPosition, c);
        bufferPosition += 2;
    }
    try {
        JSONArray channels = masterMPTiffStorage_.getDisplayAndComments().getJSONArray("Channels");
        JSONObject channelSetting;
        for (int i = 0; i < numChannels; i++) {
            channelSetting = channels.getJSONObject(i);
            //Display Ranges: For each channel, write min then max
            mdBuffer.putDouble(bufferPosition, channelSetting.getInt("Min"));
            bufferPosition += 8;
            mdBuffer.putDouble(bufferPosition, channelSetting.getInt("Max"));
            bufferPosition += 8;
        }

        //LUTs
        for (int i = 0; i < numChannels; i++) {
            channelSetting = channels.getJSONObject(i);
            LUT lut = ImageUtils.makeLUT(new Color(channelSetting.getInt("Color")),
                    channelSetting.getDouble("Gamma"));
            for (byte b : lut.getBytes()) {
                mdBuffer.put(bufferPosition, b);
                bufferPosition++;
            }
        }
    } catch (JSONException ex) {
        ReportingUtils.logError(
                "Problem with displayAndComments: Couldn't write ImageJ display settings as a result");
    }

    ifdCountAndValueBuffer = ByteBuffer.allocate(8).order(BYTE_ORDER);
    ifdCountAndValueBuffer.putInt(0, mdBufferSize);
    ifdCountAndValueBuffer.putInt(4, (int) filePosition_);
    fileChannel_.write(ifdCountAndValueBuffer, ijMetadataTagPosition_ + 4);

    fileChannel_.write(mdBuffer, filePosition_);
    filePosition_ += mdBufferSize;
}

From source file:com.koda.integ.hbase.storage.FileExtStorage.java

@Override
public StorageHandle getData(StorageHandle storeHandle, ByteBuffer buf) {
    FileStorageHandle fsh = (FileStorageHandle) storeHandle;

    // Check if current file and offset > currentFileOffset
    int id = maxId.get();
    if (fsh.getId() > id || (fsh.getId() == id && fsh.getOffset() >= currentFileOffset.get())) {
        // not found
        buf.putInt(0, 0);
        return fsh;
    }/*  w  ww .  ja  va  2s  .  c o  m*/

    RandomAccessFile file = getFile(fsh.getId());//openFile(fsh.getId(), "r");

    boolean needSecondChance = needSecondChance(fsh.getId());

    try {
        if (file == null) {
            // return null
            buf.putInt(0, 0);
        } else {
            buf.clear();
            int toRead = fsh.getSize();
            buf.putInt(fsh.getSize());
            buf.limit(4 + toRead);
            try {
                FileChannel fc = file.getChannel();
                int total = 0;
                int c = 0;
                // offset start with overall object length .add +4
                int off = fsh.getOffset() + 4;
                while (total < toRead) {
                    c = fc.read(buf, off);
                    off += c;
                    if (c < 0) {
                        // return not found
                        buf.putInt(0, 0);
                        break;
                    }
                    total += c;
                }
            } catch (IOException e) {
                // return not found
                if (fsh.getId() > minId.get()) {
                    e.printStackTrace();
                }
                buf.putInt(0, 0);
            }
        }
        if (buf.getInt(0) != 0 && needSecondChance) {
            // store again
            fsh = (FileStorageHandle) storeData(buf);
        }
        return fsh;

    } finally {
        if (file != null) {
            // return file back
            // PUT we need for old version
            putFile(fsh.getId(), file);
        }
    }

}

From source file:org.apache.tez.mapreduce.examples.RPCLoadGen.java

private UserPayload createUserPayload(TezConfiguration conf, int maxSleepTimeMillis, int payloadSize,
        String mode, Map<String, LocalResource> localResources) throws IOException {
    ByteBuffer payload;
    if (mode.equals(VIA_RPC)) {
        if (payloadSize < 5) {
            payloadSize = 5; // To Configure the processor
        }// w  ww .j a  va  2  s . c  o  m
        byte[] payloadBytes = new byte[payloadSize];
        random.nextBytes(payloadBytes);
        payload = ByteBuffer.wrap(payloadBytes);
        payload.put(4, VIA_RPC_BYTE); // ViaRPC
    } else {
        // Actual payload
        byte[] payloadBytes = new byte[5];
        payload = ByteBuffer.wrap(payloadBytes);

        // Disk payload
        byte[] diskPayload = new byte[payloadSize];
        random.nextBytes(diskPayload);
        fs = FileSystem.get(conf);
        resourcePath = new Path(Path.SEPARATOR + "tmp", DISK_PAYLOAD_NAME);
        resourcePath = fs.makeQualified(resourcePath);
        FSDataOutputStream dataOut = fs.create(resourcePath, true);
        dataOut.write(diskPayload);
        dataOut.close();
        fs.setReplication(resourcePath, (short) 10);
        FileStatus fileStatus = fs.getFileStatus(resourcePath);

        if (mode.equals(VIA_HDFS_DIST_CACHE)) {
            LocalResource lr = LocalResource.newInstance(ConverterUtils.getYarnUrlFromPath(resourcePath),
                    LocalResourceType.ARCHIVE.FILE, LocalResourceVisibility.PRIVATE, fileStatus.getLen(),
                    fileStatus.getModificationTime());
            localResources.put(DISK_PAYLOAD_NAME, lr);
            payload.put(4, VIA_HDFS_DIST_CACHE_BYTE); // ViaRPC
        } else if (mode.equals(VIA_HDFS_DIRECT_READ)) {
            payload.put(4, VIA_HDFS_DIRECT_READ_BYTE); // ViaRPC
        }
    }

    payload.putInt(0, maxSleepTimeMillis);
    return UserPayload.create(payload);
}

From source file:com.healthmarketscience.jackcess.Table.java

/**
 * Writes a new table defined by the given TableCreator to the database.
 * @usage _advanced_method_/*from w w  w  .  j a va  2s  .c om*/
 */
protected static void writeTableDefinition(TableCreator creator) throws IOException {
    // first, create the usage map page
    createUsageMapDefinitionBuffer(creator);

    // next, determine how big the table def will be (in case it will be more
    // than one page)
    JetFormat format = creator.getFormat();
    int idxDataLen = (creator.getIndexCount() * (format.SIZE_INDEX_DEFINITION + format.SIZE_INDEX_COLUMN_BLOCK))
            + (creator.getLogicalIndexCount() * format.SIZE_INDEX_INFO_BLOCK);
    int totalTableDefSize = format.SIZE_TDEF_HEADER
            + (format.SIZE_COLUMN_DEF_BLOCK * creator.getColumns().size()) + idxDataLen
            + format.SIZE_TDEF_TRAILER;

    // total up the amount of space used by the column and index names (2
    // bytes per char + 2 bytes for the length)
    for (Column col : creator.getColumns()) {
        int nameByteLen = (col.getName().length() * JetFormat.TEXT_FIELD_UNIT_SIZE);
        totalTableDefSize += nameByteLen + 2;
    }

    for (IndexBuilder idx : creator.getIndexes()) {
        int nameByteLen = (idx.getName().length() * JetFormat.TEXT_FIELD_UNIT_SIZE);
        totalTableDefSize += nameByteLen + 2;
    }

    // now, create the table definition
    PageChannel pageChannel = creator.getPageChannel();
    ByteBuffer buffer = pageChannel.createBuffer(Math.max(totalTableDefSize, format.PAGE_SIZE));
    writeTableDefinitionHeader(creator, buffer, totalTableDefSize);

    if (creator.hasIndexes()) {
        // index row counts
        IndexData.writeRowCountDefinitions(creator, buffer);
    }

    // column definitions
    Column.writeDefinitions(creator, buffer);

    if (creator.hasIndexes()) {
        // index and index data definitions
        IndexData.writeDefinitions(creator, buffer);
        Index.writeDefinitions(creator, buffer);
    }

    //End of tabledef
    buffer.put((byte) 0xff);
    buffer.put((byte) 0xff);

    // write table buffer to database
    if (totalTableDefSize <= format.PAGE_SIZE) {

        // easy case, fits on one page
        buffer.putShort(format.OFFSET_FREE_SPACE, (short) (buffer.remaining() - 8)); // overwrite page free space
        // Write the tdef page to disk.
        pageChannel.writePage(buffer, creator.getTdefPageNumber());

    } else {

        // need to split across multiple pages
        ByteBuffer partialTdef = pageChannel.createPageBuffer();
        buffer.rewind();
        int nextTdefPageNumber = PageChannel.INVALID_PAGE_NUMBER;
        while (buffer.hasRemaining()) {

            // reset for next write
            partialTdef.clear();

            if (nextTdefPageNumber == PageChannel.INVALID_PAGE_NUMBER) {

                // this is the first page.  note, the first page already has the
                // page header, so no need to write it here
                nextTdefPageNumber = creator.getTdefPageNumber();

            } else {

                // write page header
                writeTablePageHeader(partialTdef);
            }

            // copy the next page of tdef bytes
            int curTdefPageNumber = nextTdefPageNumber;
            int writeLen = Math.min(partialTdef.remaining(), buffer.remaining());
            partialTdef.put(buffer.array(), buffer.position(), writeLen);
            ByteUtil.forward(buffer, writeLen);

            if (buffer.hasRemaining()) {
                // need a next page
                nextTdefPageNumber = pageChannel.allocateNewPage();
                partialTdef.putInt(format.OFFSET_NEXT_TABLE_DEF_PAGE, nextTdefPageNumber);
            }

            // update page free space
            partialTdef.putShort(format.OFFSET_FREE_SPACE, (short) (partialTdef.remaining() - 8)); // overwrite page free space

            // write partial page to disk
            pageChannel.writePage(partialTdef, curTdefPageNumber);
        }

    }
}