Example usage for java.nio ByteBuffer clear

List of usage examples for java.nio ByteBuffer clear

Introduction

In this page you can find the example usage for java.nio ByteBuffer clear.

Prototype

public final Buffer clear() 

Source Link

Document

Clears this buffer.

Usage

From source file:com.koda.integ.hbase.blockcache.OffHeapBlockCache.java

/**
 * Store external with codec.//from ww  w  . j  ava2s.c om
 * Format:
 * 0..3  - total record size (-4)
 * 4..7  - size of a key in bytes (16 if use hash128)
 * 8 .. x - key data
 * x+1 ..x+1- IN_MEMORY flag ( 1- in memory, 0 - not)
 * x+2 ... block, serialized and compressed 
 *
 * @param blockName the block name
 * @param buf the buf
 * @param inMemory the in memory
 * @throws IOException Signals that an I/O exception has occurred.
 */
private void storeExternalWithCodec(String blockName, Cacheable buf, boolean inMemory) throws IOException {
    // If external storage is disable - bail out
    if (overflowExtEnabled == false) {
        return;
    }
    byte[] hashed = Utils.hash128(blockName);

    ByteBuffer buffer = extStorageCache.getLocalBufferWithAddress().getBuffer();
    deserializer.set(buf.getDeserializer());

    SerDe serde = extStorageCache.getSerDe();
    Codec codec = extStorageCache.getCompressionCodec();
    buffer.clear();

    buffer.position(4);

    // Save key
    buffer.putInt(hashed.length);
    buffer.put(hashed);
    buffer.put(inMemory ? (byte) 1 : (byte) 0);

    if (buf != null) {
        serde.writeCompressed(buffer, buf, codec);
        int pos = buffer.position();
        buffer.putInt(0, pos - 4);
    }
    buffer.flip();
    StorageHandle handle = storage.storeData(buffer);

    try {
        // WE USE byte array as a key
        extStorageCache.put(hashed, handle.toBytes());
    } catch (Exception e) {
        throw new IOException(e);
    }

}

From source file:org.bytesoft.bytetcc.work.CleanupWork.java

private int invokeCompress() throws RuntimeException {
    ByteBuffer current = ByteBuffer.allocate(CONSTANTS_RECORD_SIZE + 1);
    ByteBuffer previou = ByteBuffer.allocate(CONSTANTS_RECORD_SIZE + 1);
    int position = CONSTANTS_START_INDEX;
    for (int index = CONSTANTS_START_INDEX; index < this.endIndex; index += CONSTANTS_RECORD_SIZE + 1) {
        try {//from w w  w.  ja  v  a  2 s.co  m
            this.lock.lock();

            this.channel.position(index);
            this.channel.read(current);
            current.flip();
            boolean enabled = 0x1 == current.get();
            if (enabled) {
                if (index != position) {
                    if (previou.equals(current) == false) {
                        previou.put((byte) 0x1);
                        previou.put(current);

                        previou.flip();
                        current.flip();

                        this.channel.position(position);
                        this.channel.write(current);

                        previou.flip();
                        current.clear();
                    }

                    this.channel.position(index);
                    ByteBuffer buffer = ByteBuffer.allocate(1);
                    buffer.put((byte) 0x0);
                    this.channel.write(buffer);
                }
                position = index + CONSTANTS_RECORD_SIZE + 1;
            }
        } catch (IOException ex) {
            throw new RuntimeException(ex);
        } finally {
            this.lock.unlock();

            previou.flip();
            current.clear();
        }
    }

    return position;
}

From source file:org.carbondata.core.util.CarbonUtil.java

public static void writeLevelCardinalityFile(String loadFolderLoc, String tableName, int[] dimCardinality)
        throws KettleException {
    String levelCardinalityFilePath = loadFolderLoc + File.separator + CarbonCommonConstants.LEVEL_METADATA_FILE
            + tableName + CarbonCommonConstants.CARBON_METADATA_EXTENSION;
    FileOutputStream fileOutputStream = null;
    FileChannel channel = null;/*from  w w w. ja  v a 2  s.co m*/
    try {
        int dimCardinalityArrLength = dimCardinality.length;

        // first four bytes for writing the length of array, remaining for array data
        ByteBuffer buffer = ByteBuffer.allocate(CarbonCommonConstants.INT_SIZE_IN_BYTE
                + dimCardinalityArrLength * CarbonCommonConstants.INT_SIZE_IN_BYTE);

        fileOutputStream = new FileOutputStream(levelCardinalityFilePath);
        channel = fileOutputStream.getChannel();
        buffer.putInt(dimCardinalityArrLength);

        for (int i = 0; i < dimCardinalityArrLength; i++) {
            buffer.putInt(dimCardinality[i]);
        }

        buffer.flip();
        channel.write(buffer);
        buffer.clear();

        LOGGER.info("Level cardinality file written to : " + levelCardinalityFilePath);
    } catch (IOException e) {
        LOGGER.error(
                "Error while writing level cardinality file : " + levelCardinalityFilePath + e.getMessage());
        throw new KettleException("Not able to write level cardinality file", e);
    } finally {
        closeStreams(channel, fileOutputStream);
    }
}

From source file:org.opendaylight.lispflowmapping.lisp.serializer.MapNotifySerializer.java

public ByteBuffer serialize(MapNotify mapNotify) {
    int size = Length.HEADER_SIZE;
    if (mapNotify.getAuthenticationData() != null) {
        size += mapNotify.getAuthenticationData().length;
    }/*from  www  . j  av a2  s .  c om*/
    if (mapNotify.isXtrSiteIdPresent() != null && mapNotify.isXtrSiteIdPresent()) {
        size += org.opendaylight.lispflowmapping.lisp.serializer.MapRegisterSerializer.Length.XTRID_SIZE
                + org.opendaylight.lispflowmapping.lisp.serializer.MapRegisterSerializer.Length.SITEID_SIZE;
    }
    for (MappingRecordItem mappingRecord : mapNotify.getMappingRecordItem()) {
        size += MappingRecordSerializer.getInstance().getSerializationSize(mappingRecord.getMappingRecord());
    }

    ByteBuffer replyBuffer = ByteBuffer.allocate(size);
    replyBuffer.put((byte) (MessageType.MapNotify.getIntValue() << 4));
    replyBuffer.position(replyBuffer.position() + Length.RES);
    replyBuffer.put(ByteUtil.boolToBit(BooleanUtils.isTrue(mapNotify.isMergeEnabled()), Flags.MERGE_ENABLED));
    if (mapNotify.getMappingRecordItem() != null) {
        replyBuffer.put((byte) mapNotify.getMappingRecordItem().size());
    } else {
        replyBuffer.put((byte) 0);
    }
    replyBuffer.putLong(NumberUtil.asLong(mapNotify.getNonce()));
    replyBuffer.putShort(NumberUtil.asShort(mapNotify.getKeyId()));
    if (mapNotify.getAuthenticationData() != null) {
        replyBuffer.putShort((short) mapNotify.getAuthenticationData().length);
        replyBuffer.put(mapNotify.getAuthenticationData());
    } else {
        replyBuffer.putShort((short) 0);
    }

    if (mapNotify.getMappingRecordItem() != null) {
        for (MappingRecordItem mappingRecord : mapNotify.getMappingRecordItem()) {
            MappingRecordSerializer.getInstance().serialize(replyBuffer, mappingRecord.getMappingRecord());
        }
    }

    if (mapNotify.isXtrSiteIdPresent() != null && mapNotify.isXtrSiteIdPresent()) {
        replyBuffer.put(mapNotify.getXtrId().getValue());
        replyBuffer.put(mapNotify.getSiteId().getValue());
    }
    replyBuffer.clear();
    return replyBuffer;
}

From source file:com.healthmarketscience.jackcess.Table.java

/**
 * Writes a new table defined by the given TableCreator to the database.
 * @usage _advanced_method_/*from w w  w. ja v a2 s  . c o m*/
 */
protected static void writeTableDefinition(TableCreator creator) throws IOException {
    // first, create the usage map page
    createUsageMapDefinitionBuffer(creator);

    // next, determine how big the table def will be (in case it will be more
    // than one page)
    JetFormat format = creator.getFormat();
    int idxDataLen = (creator.getIndexCount() * (format.SIZE_INDEX_DEFINITION + format.SIZE_INDEX_COLUMN_BLOCK))
            + (creator.getLogicalIndexCount() * format.SIZE_INDEX_INFO_BLOCK);
    int totalTableDefSize = format.SIZE_TDEF_HEADER
            + (format.SIZE_COLUMN_DEF_BLOCK * creator.getColumns().size()) + idxDataLen
            + format.SIZE_TDEF_TRAILER;

    // total up the amount of space used by the column and index names (2
    // bytes per char + 2 bytes for the length)
    for (Column col : creator.getColumns()) {
        int nameByteLen = (col.getName().length() * JetFormat.TEXT_FIELD_UNIT_SIZE);
        totalTableDefSize += nameByteLen + 2;
    }

    for (IndexBuilder idx : creator.getIndexes()) {
        int nameByteLen = (idx.getName().length() * JetFormat.TEXT_FIELD_UNIT_SIZE);
        totalTableDefSize += nameByteLen + 2;
    }

    // now, create the table definition
    PageChannel pageChannel = creator.getPageChannel();
    ByteBuffer buffer = pageChannel.createBuffer(Math.max(totalTableDefSize, format.PAGE_SIZE));
    writeTableDefinitionHeader(creator, buffer, totalTableDefSize);

    if (creator.hasIndexes()) {
        // index row counts
        IndexData.writeRowCountDefinitions(creator, buffer);
    }

    // column definitions
    Column.writeDefinitions(creator, buffer);

    if (creator.hasIndexes()) {
        // index and index data definitions
        IndexData.writeDefinitions(creator, buffer);
        Index.writeDefinitions(creator, buffer);
    }

    //End of tabledef
    buffer.put((byte) 0xff);
    buffer.put((byte) 0xff);

    // write table buffer to database
    if (totalTableDefSize <= format.PAGE_SIZE) {

        // easy case, fits on one page
        buffer.putShort(format.OFFSET_FREE_SPACE, (short) (buffer.remaining() - 8)); // overwrite page free space
        // Write the tdef page to disk.
        pageChannel.writePage(buffer, creator.getTdefPageNumber());

    } else {

        // need to split across multiple pages
        ByteBuffer partialTdef = pageChannel.createPageBuffer();
        buffer.rewind();
        int nextTdefPageNumber = PageChannel.INVALID_PAGE_NUMBER;
        while (buffer.hasRemaining()) {

            // reset for next write
            partialTdef.clear();

            if (nextTdefPageNumber == PageChannel.INVALID_PAGE_NUMBER) {

                // this is the first page.  note, the first page already has the
                // page header, so no need to write it here
                nextTdefPageNumber = creator.getTdefPageNumber();

            } else {

                // write page header
                writeTablePageHeader(partialTdef);
            }

            // copy the next page of tdef bytes
            int curTdefPageNumber = nextTdefPageNumber;
            int writeLen = Math.min(partialTdef.remaining(), buffer.remaining());
            partialTdef.put(buffer.array(), buffer.position(), writeLen);
            ByteUtil.forward(buffer, writeLen);

            if (buffer.hasRemaining()) {
                // need a next page
                nextTdefPageNumber = pageChannel.allocateNewPage();
                partialTdef.putInt(format.OFFSET_NEXT_TABLE_DEF_PAGE, nextTdefPageNumber);
            }

            // update page free space
            partialTdef.putShort(format.OFFSET_FREE_SPACE, (short) (partialTdef.remaining() - 8)); // overwrite page free space

            // write partial page to disk
            pageChannel.writePage(partialTdef, curTdefPageNumber);
        }

    }
}

From source file:com.flexive.core.stream.BinaryUploadProtocol.java

/**
 * {@inheritDoc}//from   www.ja  v  a2 s. c om
 */
@Override
public synchronized boolean receiveStream(ByteBuffer buffer) throws IOException {
    if (!buffer.hasRemaining()) {
        //this can only happen on remote clients
        if (LOG.isDebugEnabled())
            LOG.debug("aborting (empty)");
        return false;
    }
    if (!rcvStarted) {
        rcvStarted = true;
        if (LOG.isDebugEnabled())
            LOG.debug("(internal serverside) receive start");
        try {
            pout = getContentStorage().receiveTransitBinary(division, handle, mimeType, expectedLength,
                    timeToLive);
        } catch (SQLException e) {
            LOG.error("SQL Error trying to receive binary stream: " + e.getMessage(), e);
        } catch (FxNotFoundException e) {
            LOG.error("Failed to lookup content storage for division #" + division + ": "
                    + e.getLocalizedMessage());
        }
    }
    if (LOG.isDebugEnabled() && count + buffer.remaining() > expectedLength) {
        LOG.debug("poss. overflow: pos=" + buffer.position() + " lim=" + buffer.limit() + " cap="
                + buffer.capacity());
        LOG.debug("Curr count: " + count + " count+rem="
                + (count + buffer.remaining() + " delta:" + ((count + buffer.remaining()) - expectedLength)));
    }
    count += buffer.remaining();
    pout.write(buffer.array(), buffer.position(), buffer.remaining());
    buffer.clear();
    if (expectedLength > 0 && count >= expectedLength) {
        if (LOG.isDebugEnabled())
            LOG.debug("aborting");
        return false;
    }
    return true;
}

From source file:org.opendaylight.lispflowmapping.implementation.serializer.MapRegisterSerializer.java

public ByteBuffer serialize(MapRegister mapRegister) {
    int size = Length.HEADER_SIZE;
    if (mapRegister.getAuthenticationData() != null) {
        size += mapRegister.getAuthenticationData().length;
    }/* w  w  w.  ja  v  a2  s .  co  m*/
    if (mapRegister.isXtrSiteIdPresent() != null && mapRegister.isXtrSiteIdPresent()) {
        size += Length.XTRID_SIZE + Length.SITEID_SIZE;
    }
    for (EidToLocatorRecord eidToLocatorRecord : mapRegister.getEidToLocatorRecord()) {
        size += EidToLocatorRecordSerializer.getInstance().getSerializationSize(eidToLocatorRecord);
    }

    ByteBuffer registerBuffer = ByteBuffer.allocate(size);
    registerBuffer.put((byte) ((byte) (LispMessageEnum.MapRegister.getValue() << 4)
            | ByteUtil.boolToBit(BooleanUtils.isTrue(mapRegister.isProxyMapReply()), Flags.PROXY)
            | ByteUtil.boolToBit(BooleanUtils.isTrue(mapRegister.isXtrSiteIdPresent()), Flags.XTRSITEID)));
    registerBuffer.position(registerBuffer.position() + Length.RES);
    registerBuffer
            .put(ByteUtil.boolToBit(BooleanUtils.isTrue(mapRegister.isWantMapNotify()), Flags.WANT_MAP_REPLY));
    registerBuffer.put((byte) mapRegister.getEidToLocatorRecord().size());
    registerBuffer.putLong(NumberUtil.asLong(mapRegister.getNonce()));
    registerBuffer.putShort(NumberUtil.asShort(mapRegister.getKeyId()));

    if (mapRegister.getAuthenticationData() != null) {
        registerBuffer.putShort((short) mapRegister.getAuthenticationData().length);
        registerBuffer.put(mapRegister.getAuthenticationData());
    } else {
        registerBuffer.putShort((short) 0);
    }
    for (EidToLocatorRecord eidToLocatorRecord : mapRegister.getEidToLocatorRecord()) {
        EidToLocatorRecordSerializer.getInstance().serialize(registerBuffer, eidToLocatorRecord);
    }

    if (mapRegister.isXtrSiteIdPresent() != null && mapRegister.isXtrSiteIdPresent()) {
        registerBuffer.put(mapRegister.getXtrId());
        registerBuffer.put(mapRegister.getSiteId());
    }
    registerBuffer.clear();
    return registerBuffer;
}

From source file:net.librec.data.convertor.appender.SocialDataAppender.java

/**
 * Read data from the data file. Note that we didn't take care of the
 * duplicated lines.//from  ww  w  .  ja v  a2  s  .  c  om
 *
 * @param inputDataPath
 *            the path of the data file
 * @throws IOException if I/O error occurs during reading
 */
private void readData(String inputDataPath) throws IOException {
    // Table {row-id, col-id, rate}
    Table<Integer, Integer, Double> dataTable = HashBasedTable.create();
    // Map {col-id, multiple row-id}: used to fast build a rating matrix
    Multimap<Integer, Integer> colMap = HashMultimap.create();
    // BiMap {raw id, inner id} userIds, itemIds
    final List<File> files = new ArrayList<File>();
    final ArrayList<Long> fileSizeList = new ArrayList<Long>();
    SimpleFileVisitor<Path> finder = new SimpleFileVisitor<Path>() {
        @Override
        public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
            fileSizeList.add(file.toFile().length());
            files.add(file.toFile());
            return super.visitFile(file, attrs);
        }
    };
    Files.walkFileTree(Paths.get(inputDataPath), finder);
    long allFileSize = 0;
    for (Long everyFileSize : fileSizeList) {
        allFileSize = allFileSize + everyFileSize.longValue();
    }
    // loop every dataFile collecting from walkFileTree
    for (File dataFile : files) {
        FileInputStream fis = new FileInputStream(dataFile);
        FileChannel fileRead = fis.getChannel();
        ByteBuffer buffer = ByteBuffer.allocate(BSIZE);
        int len;
        String bufferLine = new String();
        byte[] bytes = new byte[BSIZE];
        while ((len = fileRead.read(buffer)) != -1) {
            buffer.flip();
            buffer.get(bytes, 0, len);
            bufferLine = bufferLine.concat(new String(bytes, 0, len)).replaceAll("\r", "\n");
            String[] bufferData = bufferLine.split("(\n)+");
            boolean isComplete = bufferLine.endsWith("\n");
            int loopLength = isComplete ? bufferData.length : bufferData.length - 1;
            for (int i = 0; i < loopLength; i++) {
                String line = new String(bufferData[i]);
                String[] data = line.trim().split("[ \t,]+");
                String userA = data[0];
                String userB = data[1];
                Double rate = (data.length >= 3) ? Double.valueOf(data[2]) : 1.0;
                if (userIds.containsKey(userA) && userIds.containsKey(userB)) {
                    int row = userIds.get(userA);
                    int col = userIds.get(userB);
                    dataTable.put(row, col, rate);
                    colMap.put(col, row);
                }
            }
            if (!isComplete) {
                bufferLine = bufferData[bufferData.length - 1];
            }
            buffer.clear();
        }
        fileRead.close();
        fis.close();
    }
    int numRows = userIds.size(), numCols = userIds.size();
    // build rating matrix
    userSocialMatrix = new SparseMatrix(numRows, numCols, dataTable, colMap);
    // release memory of data table
    dataTable = null;
}

From source file:io.neba.core.logviewer.Tail.java

@Override
public void run() {
    SeekableByteChannel channel = null;

    try {/* www. j ava2s. com*/
        channel = newByteChannel(this.file.toPath(), READ);

        long availableInByte = this.file.length();
        long startingFromInByte = max(availableInByte - this.bytesToTail, 0);

        channel.position(startingFromInByte);

        long position = startingFromInByte;
        long totalBytesRead = 0L;

        // Read up to this amount of data from the file at once.
        ByteBuffer readBuffer = allocate(4096);
        while (!this.stopped) {

            // The file might be temporarily gone during rotation. Wait, then decide
            // whether the file is considered gone permanently or whether a rotation has occurred.
            if (!this.file.exists()) {
                sleep(AWAIT_FILE_ROTATION_MILLIS);
            }
            if (!this.file.exists()) {
                this.remoteEndpoint.sendString("file not found");
                return;
            }

            if (position > this.file.length()) {
                this.remoteEndpoint.sendString("file rotated");
                position = 0;
                closeQuietly(channel);
                channel = newByteChannel(this.file.toPath(), READ);
            }

            int read = channel.read(readBuffer);

            if (read == -1) {
                if (mode == TAIL) {
                    // EOF, we are done.
                    return;
                }
                // If we are in follow mode, reaching the end of the file might signal a file rotation. Sleep and re-try.
                sleep(TAIL_CHECK_INTERVAL_MILLIS);
                continue;
            }

            totalBytesRead += read;

            position = channel.position();
            readBuffer.flip();
            this.remoteEndpoint.sendBytes(readBuffer);
            readBuffer.clear();

            if (mode == TAIL && totalBytesRead >= this.bytesToTail) {
                return;
            }
        }
    } catch (IOException e) {
        this.logger.error("Unable to tail " + this.file.getAbsolutePath() + ".", e);
    } catch (InterruptedException e) {
        if (!this.stopped) {
            this.logger.error("Stopped tailing " + this.file.getAbsolutePath() + ", got interrupted.", e);
        }
    } finally {
        closeQuietly(channel);
    }
}

From source file:voldemort.store.cachestore.impl.ChannelStore.java

private void init(boolean reset) throws IOException {
    if (reset) {/* www.j a  v a2 s .co  m*/
        indexChannel.truncate(OFFSET);
        dataChannel.truncate(OFFSET);
        keyChannel.truncate(OFFSET);
        totalRecord = 0;
    } else {
        long length = indexChannel.size() - OFFSET;
        totalRecord = (int) (length / RECORD_SIZE);
        ByteBuffer buf = ByteBuffer.allocate(RECORD_SIZE);
        logger.info("Building key map and read index file for " + filename + " total record " + totalRecord);
        long per = 0;
        int j = 0;
        if (totalRecord >= 1000000)
            per = totalRecord / 10;

        for (int i = 0; i < totalRecord; i++) {
            indexChannel.read(buf);
            assert (buf.capacity() == RECORD_SIZE);
            buf.rewind();
            byte status = buf.get();
            if (isDeleted(status))
                this.deleted++;
            else {
                long key = buf.getLong();
                byte[] keys;
                try {
                    keys = readChannel(key, keyChannel);
                    long data = buf.getLong();
                    long block2version = buf.getLong();
                    CacheBlock block = new CacheBlock(i, data, block2version, status);
                    map.put(toKey(keys), block);
                } catch (Exception ex) {
                    logger.warn("Not able to read record no " + i + " , skip reason " + ex.getMessage());
                    buf.clear();
                    error++;
                    continue;
                }
            }
            buf.clear();
            if (per > 0 && (i + 1) % per == 0) {
                logger.info((++j * 10) + "% complete");
            }
        }
    }
    dataOffset = dataChannel.size();
    keyOffset = keyChannel.size();
    //logOffset = logChannel.size();
    logger.info("Total record " + totalRecord + " deleted " + deleted + " error " + error + " active "
            + (totalRecord - deleted - error));
}