Example usage for java.nio ByteBuffer rewind

List of usage examples for java.nio ByteBuffer rewind

Introduction

In this page you can find the example usage for java.nio ByteBuffer rewind.

Prototype

public final Buffer rewind() 

Source Link

Document

Rewinds this buffer.

Usage

From source file:org.cosmo.common.util.Util.java

public static long shortsToLong2(short[] shorts) {
    ByteBuffer b = ByteBuffer.allocate(8);
    b.putShort(shorts[0]);//from   w  ww  . j a  v a2 s  . c  om
    b.putShort(shorts[1]);
    b.putShort(shorts[2]);
    b.putShort(shorts[3]);
    b.rewind();
    return b.getLong();
}

From source file:com.healthmarketscience.jackcess.impl.PageChannel.java

/**
 * Write a page (or part of a page) to disk
 * @param page Page to write/*from www  .j ava 2  s  .  c om*/
 * @param pageNumber Page number to write the page to
 * @param pageOffset offset within the page at which to start writing the
 *                   page data
 */
public void writePage(ByteBuffer page, int pageNumber, int pageOffset) throws IOException {
    assertWriting();
    validatePageNumber(pageNumber);

    page.rewind().position(pageOffset);

    int writeLen = page.remaining();
    if ((writeLen + pageOffset) > getFormat().PAGE_SIZE) {
        throw new IllegalArgumentException("Page buffer is too large, size " + (writeLen + pageOffset));
    }

    ByteBuffer encodedPage = page;
    if (pageNumber == 0) {
        // re-mask header
        applyHeaderMask(page);
    } else {

        if (!_codecHandler.canEncodePartialPage()) {
            if ((pageOffset > 0) && (writeLen < getFormat().PAGE_SIZE)) {

                // current codec handler cannot encode part of a page, so need to
                // copy the modified part into the current page contents in a temp
                // buffer so that we can encode the entire page
                ByteBuffer fullPage = _fullPageEncodeBufferH.setPage(this, pageNumber);

                // copy the modified part to the full page
                fullPage.position(pageOffset);
                fullPage.put(page);
                fullPage.rewind();

                // reset so we can write the whole page
                page = fullPage;
                pageOffset = 0;
                writeLen = getFormat().PAGE_SIZE;

            } else {

                _fullPageEncodeBufferH.possiblyInvalidate(pageNumber, null);
            }
        }

        // re-encode page
        encodedPage = _codecHandler.encodePage(page, pageNumber, pageOffset);

        // reset position/limit in case they were affected by encoding
        encodedPage.position(pageOffset).limit(pageOffset + writeLen);
    }

    try {
        _channel.write(encodedPage, (getPageOffset(pageNumber) + pageOffset));
    } finally {
        if (pageNumber == 0) {
            // de-mask header
            applyHeaderMask(page);
        }
    }
}

From source file:org.apache.hadoop.hbase.coprocessor.AggregateImplementation.java

/**
 * Gives the row count for the given column family and column qualifier, in
 * the given row range as defined in the Scan object.
 * @throws IOException//from  ww  w . j a va 2  s  . co m
 */
@Override
public void getRowNum(RpcController controller, AggregateRequest request, RpcCallback<AggregateResponse> done) {
    AggregateResponse response = null;
    long counter = 0l;
    List<Cell> results = new ArrayList<Cell>();
    InternalScanner scanner = null;
    try {
        Scan scan = ProtobufUtil.toScan(request.getScan());
        byte[][] colFamilies = scan.getFamilies();
        byte[] colFamily = colFamilies != null ? colFamilies[0] : null;
        NavigableSet<byte[]> qualifiers = colFamilies != null ? scan.getFamilyMap().get(colFamily) : null;
        byte[] qualifier = null;
        if (qualifiers != null && !qualifiers.isEmpty()) {
            qualifier = qualifiers.pollFirst();
        }
        if (scan.getFilter() == null && qualifier == null)
            scan.setFilter(new FirstKeyOnlyFilter());
        scanner = env.getRegion().getScanner(scan);
        boolean hasMoreRows = false;
        do {
            hasMoreRows = scanner.next(results);
            if (results.size() > 0) {
                counter++;
            }
            results.clear();
        } while (hasMoreRows);
        ByteBuffer bb = ByteBuffer.allocate(8).putLong(counter);
        bb.rewind();
        response = AggregateResponse.newBuilder().addFirstPart(ByteString.copyFrom(bb)).build();
    } catch (IOException e) {
        ResponseConverter.setControllerException(controller, e);
    } finally {
        if (scanner != null) {
            try {
                scanner.close();
            } catch (IOException ignored) {
            }
        }
    }
    log.info("Row counter from this region is " + env.getRegion().getRegionNameAsString() + ": " + counter);
    done.run(response);
}

From source file:org.kalypso.grid.BinaryGeoGrid.java

/**
 * @param fillGrid/*from  ww  w . ja v a2  s  .  c  o  m*/
 *          If set to <code>true</code>, the grid will be initially filled with no-data values. Else, the grid values
 *          are undetermined.
 */
public BinaryGeoGrid(final FileChannel channel, final int sizeX, final int sizeY, final int scale,
        final Coordinate origin, final Coordinate offsetX, final Coordinate offsetY, final String sourceCRS,
        final boolean fillGrid) throws GeoGridException {
    super(origin, offsetX, offsetY, sourceCRS);

    m_readBuffer = ByteBuffer.allocate(4 * sizeX * BUFFER_LINES);
    m_readBuffer.order(ByteOrder.BIG_ENDIAN);

    /* create write buffer, also marks this grid as writable */
    m_writeBuffer = ByteBuffer.allocate(4);
    m_writeBuffer.order(ByteOrder.BIG_ENDIAN);

    try {
        m_channel = channel;
        m_binFile = null;

        m_header = new BinaryGeoGridHeader(sizeX, sizeY, scale);

        m_unscaledMin = null;
        m_unscaledMax = null;

        /* Initialize grid */
        // m_randomAccessFile.setLength( HEADER_SIZE + sizeX * sizeY * 4 + 2 * 4 );
        m_channel.truncate(BinaryGeoGridHeader.HEADER_SIZE + sizeX * sizeY * 4 + 2 * 4);

        /* Read header */
        m_channel.position(0);
        m_header.write(m_channel);

        /* Set everything to non-data */
        if (fillGrid) {
            final ByteBuffer buffer = ByteBuffer.allocate(sizeX * 4);
            for (int y = 0; y < sizeY; y++) {
                buffer.rewind();
                for (int x = 0; x < sizeX; x++)
                    buffer.putInt(NO_DATA);
                m_channel.write(buffer);
            }
        }

        /* Read statistical data */
        saveStatistically();
    } catch (final IOException e) {
        throw new GeoGridException("Failed to initiate random access file", e);
    }
}

From source file:org.apache.hadoop.hbase.coprocessor.AggregateImplementation.java

/**
 * Gives a Pair with first object as Sum and second object as row count,
 * computed for a given combination of column qualifier and column family in
 * the given row range as defined in the Scan object. In its current
 * implementation, it takes one column family and one column qualifier (if
 * provided). In case of null column qualifier, an aggregate sum over all the
 * entire column family will be returned.
 * <p>/*  ww  w  . j ava  2 s  .co m*/
 * The average is computed in
 * AggregationClient#avg(byte[], ColumnInterpreter, Scan) by
 * processing results from all regions, so its "ok" to pass sum and a Long
 * type.
 */
@Override
public void getAvg(RpcController controller, AggregateRequest request, RpcCallback<AggregateResponse> done) {
    AggregateResponse response = null;
    InternalScanner scanner = null;
    try {
        ColumnInterpreter<T, S, P, Q, R> ci = constructColumnInterpreterFromRequest(request);
        S sumVal = null;
        Long rowCountVal = 0l;
        Scan scan = ProtobufUtil.toScan(request.getScan());
        scanner = env.getRegion().getScanner(scan);
        byte[] colFamily = scan.getFamilies()[0];
        NavigableSet<byte[]> qualifiers = scan.getFamilyMap().get(colFamily);
        byte[] qualifier = null;
        if (qualifiers != null && !qualifiers.isEmpty()) {
            qualifier = qualifiers.pollFirst();
        }
        List<Cell> results = new ArrayList<Cell>();
        boolean hasMoreRows = false;

        do {
            results.clear();
            hasMoreRows = scanner.next(results);
            for (Cell kv : results) {
                sumVal = ci.add(sumVal, ci.castToReturnType(ci.getValue(colFamily, qualifier, kv)));
            }
            rowCountVal++;
        } while (hasMoreRows);
        if (sumVal != null) {
            ByteString first = ci.getProtoForPromotedType(sumVal).toByteString();
            AggregateResponse.Builder pair = AggregateResponse.newBuilder();
            pair.addFirstPart(first);
            ByteBuffer bb = ByteBuffer.allocate(8).putLong(rowCountVal);
            bb.rewind();
            pair.setSecondPart(ByteString.copyFrom(bb));
            response = pair.build();
        }
    } catch (IOException e) {
        ResponseConverter.setControllerException(controller, e);
    } finally {
        if (scanner != null) {
            try {
                scanner.close();
            } catch (IOException ignored) {
            }
        }
    }
    done.run(response);
}

From source file:org.apache.hadoop.hbase.coprocessor.AggregateImplementation.java

/**
 * Gives a Pair with first object a List containing Sum and sum of squares,
 * and the second object as row count. It is computed for a given combination of
 * column qualifier and column family in the given row range as defined in the
 * Scan object. In its current implementation, it takes one column family and
 * one column qualifier (if provided). The idea is get the value of variance first:
 * the average of the squares less the square of the average a standard
 * deviation is square root of variance.
 *//*  w w  w  .j a v a  2 s.  c om*/
@Override
public void getStd(RpcController controller, AggregateRequest request, RpcCallback<AggregateResponse> done) {
    InternalScanner scanner = null;
    AggregateResponse response = null;
    try {
        ColumnInterpreter<T, S, P, Q, R> ci = constructColumnInterpreterFromRequest(request);
        S sumVal = null, sumSqVal = null, tempVal = null;
        long rowCountVal = 0l;
        Scan scan = ProtobufUtil.toScan(request.getScan());
        scanner = env.getRegion().getScanner(scan);
        byte[] colFamily = scan.getFamilies()[0];
        NavigableSet<byte[]> qualifiers = scan.getFamilyMap().get(colFamily);
        byte[] qualifier = null;
        if (qualifiers != null && !qualifiers.isEmpty()) {
            qualifier = qualifiers.pollFirst();
        }
        List<Cell> results = new ArrayList<Cell>();

        boolean hasMoreRows = false;

        do {
            tempVal = null;
            hasMoreRows = scanner.next(results);
            for (Cell kv : results) {
                tempVal = ci.add(tempVal, ci.castToReturnType(ci.getValue(colFamily, qualifier, kv)));
            }
            results.clear();
            sumVal = ci.add(sumVal, tempVal);
            sumSqVal = ci.add(sumSqVal, ci.multiply(tempVal, tempVal));
            rowCountVal++;
        } while (hasMoreRows);
        if (sumVal != null) {
            ByteString first_sumVal = ci.getProtoForPromotedType(sumVal).toByteString();
            ByteString first_sumSqVal = ci.getProtoForPromotedType(sumSqVal).toByteString();
            AggregateResponse.Builder pair = AggregateResponse.newBuilder();
            pair.addFirstPart(first_sumVal);
            pair.addFirstPart(first_sumSqVal);
            ByteBuffer bb = ByteBuffer.allocate(8).putLong(rowCountVal);
            bb.rewind();
            pair.setSecondPart(ByteString.copyFrom(bb));
            response = pair.build();
        }
    } catch (IOException e) {
        ResponseConverter.setControllerException(controller, e);
    } finally {
        if (scanner != null) {
            try {
                scanner.close();
            } catch (IOException ignored) {
            }
        }
    }
    done.run(response);
}

From source file:voldemort.store.cachestore.impl.ChannelStore.java

/**
 * get keyOffset and len from index channel
 * @param record #//from www  . j  a v  a2 s.  c o m
 * @return key Object
 */
public Key readKey(int record) throws IOException {
    ByteBuffer buf = ByteBuffer.allocate(RECORD_SIZE);
    indexChannel.read(buf, (long) record * RECORD_SIZE + OFFSET);
    buf.rewind();
    byte status = buf.get();
    if (isDeleted(status))
        return null;
    else {
        long key = buf.getLong();
        byte[] keys = readChannel(key, keyChannel);
        return toKey(keys);
    }
}

From source file:org.codehaus.preon.buffer.DefaultBitBuffer.java

public ByteBuffer readAsByteBuffer() {
    ByteBuffer buffer = byteBuffer.duplicate();
    buffer.rewind();
    return buffer;
}

From source file:org.kalypso.grid.BinaryGeoGrid.java

private void saveStatistically() throws GeoGridException {
    final BigDecimal min = getMin();
    final BigDecimal max = getMax();

    try {//  w w w .j  a  va 2  s.  co  m
        m_unscaledMin = unscaleValue(min.doubleValue());
        m_unscaledMax = unscaleValue(max.doubleValue());

        /* directly write into buffer */
        final long pos = BinaryGeoGridHeader.HEADER_SIZE + getSizeX() * getSizeY() * 4;

        final ByteBuffer buffer = ByteBuffer.allocate(8);

        buffer.putInt(m_unscaledMin);
        buffer.putInt(m_unscaledMax);

        m_channel.position(pos);
        buffer.rewind();
        m_channel.write(buffer);
    } catch (final IOException e) {
        throw new GeoGridException("Failed to set statistical data", e);
    }
}

From source file:com.intel.chimera.stream.AbstractCryptoStreamTest.java

private void byteBufferReadCheck(InputStream in, ByteBuffer buf, int bufPos) throws Exception {
    buf.position(bufPos);//from   www  . ja  v a2 s. c o  m
    int n = ((ReadableByteChannel) in).read(buf);
    Assert.assertEquals(bufPos + n, buf.position());
    byte[] readData = new byte[n];
    buf.rewind();
    buf.position(bufPos);
    buf.get(readData);
    byte[] expectedData = new byte[n];
    System.arraycopy(data, 0, expectedData, 0, n);
    Assert.assertArrayEquals(readData, expectedData);
}