Example usage for java.nio ByteBuffer rewind

List of usage examples for java.nio ByteBuffer rewind

Introduction

In this page you can find the example usage for java.nio ByteBuffer rewind.

Prototype

public final Buffer rewind() 

Source Link

Document

Rewinds this buffer.

Usage

From source file:edu.harvard.iq.dvn.ingest.statdataio.impl.plugins.por.PORFileReader.java

private File decodeHeader(BufferedInputStream stream) throws IOException {
    File tempPORfile = null;//  w  w  w. ja  v a2  s .  c  o m

    if (stream == null) {
        throw new IllegalArgumentException("file == null!");
    }

    byte[] headerByes = new byte[POR_HEADER_SIZE];

    if (stream.markSupported()) {
        stream.mark(1000);
    }
    int nbytes = stream.read(headerByes, 0, POR_HEADER_SIZE);

    //printHexDump(headerByes, "hex dump of the byte-array");

    if (nbytes == 0) {
        throw new IOException("decodeHeader: reading failure");
    } else if (nbytes < 491) {
        // Size test: by defnition, it must have at least
        // 491-byte header, i.e., the file size less than this threshold
        // is not a POR file
        dbgLog.fine("this file is NOT spss-por type");
        throw new IllegalArgumentException("file is not spss-por type");
    }
    // rewind the current reading position back to the beginning
    if (stream.markSupported()) {
        stream.reset();
    }

    // line-terminating characters are usually one or two by defnition
    // however, a POR file saved by a genuine SPSS for Windows
    // had a three-character line terminator, i.e., failed to remove the
    // original file's one-character terminator when it was opened, and
    // saved it with the default two-character terminator without
    // removing original terminators. So we have to expect such a rare
    // case
    //
    // terminator
    // windows [0D0A]=>   [1310] = [CR/LF]
    // unix    [0A]  =>   [10]
    // mac     [0D]  =>   [13]
    // 3char  [0D0D0A]=> [131310] spss for windows rel 15
    //
    // terminating characters should be found at the following
    //                             column positions[counting from 0]:
    // unix    case: [0A]   : [80], [161], [242], [323], [404], [485]
    // windows case: [0D0A] : [81], [163], [245], [327], [409], [491]
    //           : [0D0D0A] : [82], [165], [248], [331], [414], [495]

    // convert b into a ByteBuffer

    ByteBuffer buff = ByteBuffer.wrap(headerByes);
    byte[] nlch = new byte[36];
    int pos1;
    int pos2;
    int pos3;
    int ucase = 0;
    int wcase = 0;
    int mcase = 0;
    int three = 0;
    int nolines = 6;
    int nocols = 80;
    for (int i = 0; i < nolines; ++i) {
        int baseBias = nocols * (i + 1);
        // 1-char case
        pos1 = baseBias + i;
        buff.position(pos1);
        dbgLog.finer("\tposition(1)=" + buff.position());
        int j = 6 * i;
        nlch[j] = buff.get();

        if (nlch[j] == 10) {
            ucase++;
        } else if (nlch[j] == 13) {
            mcase++;
        }

        // 2-char case
        pos2 = baseBias + 2 * i;
        buff.position(pos2);
        dbgLog.finer("\tposition(2)=" + buff.position());

        nlch[j + 1] = buff.get();
        nlch[j + 2] = buff.get();

        // 3-char case
        pos3 = baseBias + 3 * i;
        buff.position(pos3);
        dbgLog.finer("\tposition(3)=" + buff.position());

        nlch[j + 3] = buff.get();
        nlch[j + 4] = buff.get();
        nlch[j + 5] = buff.get();

        dbgLog.finer(i + "-th iteration position =" + nlch[j] + "\t" + nlch[j + 1] + "\t" + nlch[j + 2]);
        dbgLog.finer(i + "-th iteration position =" + nlch[j + 3] + "\t" + nlch[j + 4] + "\t" + nlch[j + 5]);

        if ((nlch[j + 3] == 13) && (nlch[j + 4] == 13) && (nlch[j + 5] == 10)) {
            three++;
        } else if ((nlch[j + 1] == 13) && (nlch[j + 2] == 10)) {
            wcase++;
        }

        buff.rewind();
    }

    boolean windowsNewLine = true;
    if (three == nolines) {
        windowsNewLine = false; // lineTerminator = "0D0D0A"
    } else if ((ucase == nolines) && (wcase < nolines)) {
        windowsNewLine = false; // lineTerminator = "0A"
    } else if ((ucase < nolines) && (wcase == nolines)) {
        windowsNewLine = true; //lineTerminator = "0D0A"
    } else if ((mcase == nolines) && (wcase < nolines)) {
        windowsNewLine = false; //lineTerminator = "0D"
    }

    buff.rewind();
    int PORmarkPosition = POR_MARK_POSITION_DEFAULT;
    if (windowsNewLine) {
        PORmarkPosition = PORmarkPosition + 5;
    } else if (three == nolines) {
        PORmarkPosition = PORmarkPosition + 10;
    }

    byte[] pormark = new byte[8];
    buff.position(PORmarkPosition);
    buff.get(pormark, 0, 8);
    String pormarks = new String(pormark);

    //dbgLog.fine("pormark =>" + pormarks + "<-");
    dbgLog.fine(
            "pormark[hex: 53 50 53 53 50 4F 52 54 == SPSSPORT] =>" + new String(Hex.encodeHex(pormark)) + "<-");

    if (pormarks.equals(POR_MARK)) {
        dbgLog.fine("POR ID toke test: Passed");
        init();

        smd.getFileInformation().put("mimeType", MIME_TYPE);
        smd.getFileInformation().put("fileFormat", MIME_TYPE);

    } else {
        dbgLog.fine("this file is NOT spss-por type");
        throw new IllegalArgumentException("decodeHeader: POR ID token was not found");
    }

    // save the POR file without new line characters

    FileOutputStream fileOutPOR = null;
    Writer fileWriter = null;

    // Scanner class can handle three-character line-terminator
    Scanner porScanner = null;

    try {
        tempPORfile = File.createTempFile("tempPORfile.", ".por");
        fileOutPOR = new FileOutputStream(tempPORfile);
        fileWriter = new BufferedWriter(new OutputStreamWriter(fileOutPOR, "utf8"));
        porScanner = new Scanner(stream);

        // Because 64-bit and 32-bit machines decode POR's first 40-byte
        // sequence differently, the first 5 leader lines are skipped from
        // the new-line-stripped file

        int lineCounter = 0;
        while (porScanner.hasNextLine()) {
            lineCounter++;
            if (lineCounter <= 5) {
                String line = porScanner.nextLine().toString();
                dbgLog.fine("line=" + lineCounter + ":" + line.length() + ":" + line);
            } else {
                fileWriter.write(porScanner.nextLine().toString());
            }
        }
    } finally {
        try {
            if (fileWriter != null) {
                fileWriter.close();
            }
        } catch (IOException ex) {
            ex.printStackTrace();
        }

        if (porScanner != null) {
            porScanner.close();
        }
    }

    return tempPORfile;
}

From source file:com.healthmarketscience.jackcess.impl.TableImpl.java

/**
 * Writes a new table defined by the given TableCreator to the database.
 * @usage _advanced_method_/*from   w  w  w.  ja v a  2 s.  c o  m*/
 */
protected static void writeTableDefinition(TableCreator creator) throws IOException {
    // first, create the usage map page
    createUsageMapDefinitionBuffer(creator);

    // next, determine how big the table def will be (in case it will be more
    // than one page)
    JetFormat format = creator.getFormat();
    int idxDataLen = (creator.getIndexCount() * (format.SIZE_INDEX_DEFINITION + format.SIZE_INDEX_COLUMN_BLOCK))
            + (creator.getLogicalIndexCount() * format.SIZE_INDEX_INFO_BLOCK);
    int colUmapLen = creator.getLongValueColumns().size() * 10;
    int totalTableDefSize = format.SIZE_TDEF_HEADER
            + (format.SIZE_COLUMN_DEF_BLOCK * creator.getColumns().size()) + idxDataLen + colUmapLen
            + format.SIZE_TDEF_TRAILER;

    // total up the amount of space used by the column and index names (2
    // bytes per char + 2 bytes for the length)
    for (ColumnBuilder col : creator.getColumns()) {
        int nameByteLen = (col.getName().length() * JetFormat.TEXT_FIELD_UNIT_SIZE);
        totalTableDefSize += nameByteLen + 2;
    }

    for (IndexBuilder idx : creator.getIndexes()) {
        int nameByteLen = (idx.getName().length() * JetFormat.TEXT_FIELD_UNIT_SIZE);
        totalTableDefSize += nameByteLen + 2;
    }

    // now, create the table definition
    PageChannel pageChannel = creator.getPageChannel();
    ByteBuffer buffer = PageChannel.createBuffer(Math.max(totalTableDefSize, format.PAGE_SIZE));
    writeTableDefinitionHeader(creator, buffer, totalTableDefSize);

    if (creator.hasIndexes()) {
        // index row counts
        IndexData.writeRowCountDefinitions(creator, buffer);
    }

    // column definitions
    ColumnImpl.writeDefinitions(creator, buffer);

    if (creator.hasIndexes()) {
        // index and index data definitions
        IndexData.writeDefinitions(creator, buffer);
        IndexImpl.writeDefinitions(creator, buffer);
    }

    // write long value column usage map references
    for (ColumnBuilder lvalCol : creator.getLongValueColumns()) {
        buffer.putShort(lvalCol.getColumnNumber());
        TableCreator.ColumnState colState = creator.getColumnState(lvalCol);

        // owned pages umap (both are on same page)
        buffer.put(colState.getUmapOwnedRowNumber());
        ByteUtil.put3ByteInt(buffer, colState.getUmapPageNumber());
        // free space pages umap
        buffer.put(colState.getUmapFreeRowNumber());
        ByteUtil.put3ByteInt(buffer, colState.getUmapPageNumber());
    }

    //End of tabledef
    buffer.put((byte) 0xff);
    buffer.put((byte) 0xff);

    // write table buffer to database
    if (totalTableDefSize <= format.PAGE_SIZE) {

        // easy case, fits on one page
        buffer.putShort(format.OFFSET_FREE_SPACE, (short) (buffer.remaining() - 8)); // overwrite page free space
        // Write the tdef page to disk.
        pageChannel.writePage(buffer, creator.getTdefPageNumber());

    } else {

        // need to split across multiple pages
        ByteBuffer partialTdef = pageChannel.createPageBuffer();
        buffer.rewind();
        int nextTdefPageNumber = PageChannel.INVALID_PAGE_NUMBER;
        while (buffer.hasRemaining()) {

            // reset for next write
            partialTdef.clear();

            if (nextTdefPageNumber == PageChannel.INVALID_PAGE_NUMBER) {

                // this is the first page.  note, the first page already has the
                // page header, so no need to write it here
                nextTdefPageNumber = creator.getTdefPageNumber();

            } else {

                // write page header
                writeTablePageHeader(partialTdef);
            }

            // copy the next page of tdef bytes
            int curTdefPageNumber = nextTdefPageNumber;
            int writeLen = Math.min(partialTdef.remaining(), buffer.remaining());
            partialTdef.put(buffer.array(), buffer.position(), writeLen);
            ByteUtil.forward(buffer, writeLen);

            if (buffer.hasRemaining()) {
                // need a next page
                nextTdefPageNumber = pageChannel.allocateNewPage();
                partialTdef.putInt(format.OFFSET_NEXT_TABLE_DEF_PAGE, nextTdefPageNumber);
            }

            // update page free space
            partialTdef.putShort(format.OFFSET_FREE_SPACE, (short) (partialTdef.remaining() - 8)); // overwrite page free space

            // write partial page to disk
            pageChannel.writePage(partialTdef, curTdefPageNumber);
        }

    }
}

From source file:org.apache.bookkeeper.bookie.Bookie.java

void readJournal() throws IOException, BookieException {
    long startTs = MathUtils.now();
    journal.replay(new JournalScanner() {
        @Override//from  ww w .j  a v a  2  s.  c om
        public void process(int journalVersion, long offset, ByteBuffer recBuff) throws IOException {
            long ledgerId = recBuff.getLong();
            long entryId = recBuff.getLong();
            try {
                LOG.debug("Replay journal - ledger id : {}, entry id : {}.", ledgerId, entryId);
                if (entryId == METAENTRY_ID_LEDGER_KEY) {
                    if (journalVersion >= JournalChannel.V3) {
                        int masterKeyLen = recBuff.getInt();
                        byte[] masterKey = new byte[masterKeyLen];

                        recBuff.get(masterKey);
                        masterKeyCache.put(ledgerId, masterKey);
                    } else {
                        throw new IOException("Invalid journal. Contains journalKey " + " but layout version ("
                                + journalVersion + ") is too old to hold this");
                    }
                } else if (entryId == METAENTRY_ID_FENCE_KEY) {
                    if (journalVersion >= JournalChannel.V4) {
                        byte[] key = masterKeyCache.get(ledgerId);
                        if (key == null) {
                            key = ledgerStorage.readMasterKey(ledgerId);
                        }
                        LedgerDescriptor handle = handles.getHandle(ledgerId, key);
                        handle.setFenced();
                    } else {
                        throw new IOException("Invalid journal. Contains fenceKey " + " but layout version ("
                                + journalVersion + ") is too old to hold this");
                    }
                } else {
                    byte[] key = masterKeyCache.get(ledgerId);
                    if (key == null) {
                        key = ledgerStorage.readMasterKey(ledgerId);
                    }
                    LedgerDescriptor handle = handles.getHandle(ledgerId, key);

                    recBuff.rewind();
                    handle.addEntry(recBuff);
                }
            } catch (NoLedgerException nsle) {
                LOG.debug("Skip replaying entries of ledger {} since it was deleted.", ledgerId);
            } catch (BookieException be) {
                throw new IOException(be);
            }
        }
    });
    long elapsedTs = MathUtils.now() - startTs;
    LOG.info("Finished replaying journal in {} ms.", elapsedTs);
}

From source file:edu.harvard.iq.dataverse.ingest.tabulardata.impl.plugins.por.PORFileReader.java

private File decodeHeader(BufferedInputStream stream) throws IOException {
    dbgLog.fine("decodeHeader(): start");
    File tempPORfile = null;//from w w w.j  a  v a  2 s  .c om

    if (stream == null) {
        throw new IllegalArgumentException("file == null!");
    }

    byte[] headerByes = new byte[POR_HEADER_SIZE];

    if (stream.markSupported()) {
        stream.mark(1000);
    }
    int nbytes = stream.read(headerByes, 0, POR_HEADER_SIZE);

    //printHexDump(headerByes, "hex dump of the byte-array");

    if (nbytes == 0) {
        throw new IOException("decodeHeader: reading failure");
    } else if (nbytes < 491) {
        // Size test: by defnition, it must have at least
        // 491-byte header, i.e., the file size less than this threshold
        // is not a POR file
        dbgLog.fine("this file is NOT spss-por type");
        throw new IllegalArgumentException("file is not spss-por type");
    }
    // rewind the current reading position back to the beginning
    if (stream.markSupported()) {
        stream.reset();
    }

    // line-terminating characters are usually one or two by defnition
    // however, a POR file saved by a genuine SPSS for Windows
    // had a three-character line terminator, i.e., failed to remove the
    // original file's one-character terminator when it was opened, and
    // saved it with the default two-character terminator without
    // removing original terminators. So we have to expect such a rare
    // case
    //
    // terminator
    // windows [0D0A]=>   [1310] = [CR/LF]
    // unix    [0A]  =>   [10]
    // mac     [0D]  =>   [13]
    // 3char  [0D0D0A]=> [131310] spss for windows rel 15
    //
    // terminating characters should be found at the following
    //                             column positions[counting from 0]:
    // unix    case: [0A]   : [80], [161], [242], [323], [404], [485]
    // windows case: [0D0A] : [81], [163], [245], [327], [409], [491]
    //           : [0D0D0A] : [82], [165], [248], [331], [414], [495]

    // convert b into a ByteBuffer

    ByteBuffer buff = ByteBuffer.wrap(headerByes);
    byte[] nlch = new byte[36];
    int pos1;
    int pos2;
    int pos3;
    int ucase = 0;
    int wcase = 0;
    int mcase = 0;
    int three = 0;
    int nolines = 6;
    int nocols = 80;
    for (int i = 0; i < nolines; ++i) {
        int baseBias = nocols * (i + 1);
        // 1-char case
        pos1 = baseBias + i;
        buff.position(pos1);
        dbgLog.finer("\tposition(1)=" + buff.position());
        int j = 6 * i;
        nlch[j] = buff.get();

        if (nlch[j] == 10) {
            ucase++;
        } else if (nlch[j] == 13) {
            mcase++;
        }

        // 2-char case
        pos2 = baseBias + 2 * i;
        buff.position(pos2);
        dbgLog.finer("\tposition(2)=" + buff.position());

        nlch[j + 1] = buff.get();
        nlch[j + 2] = buff.get();

        // 3-char case
        pos3 = baseBias + 3 * i;
        buff.position(pos3);
        dbgLog.finer("\tposition(3)=" + buff.position());

        nlch[j + 3] = buff.get();
        nlch[j + 4] = buff.get();
        nlch[j + 5] = buff.get();

        dbgLog.finer(i + "-th iteration position =" + nlch[j] + "\t" + nlch[j + 1] + "\t" + nlch[j + 2]);
        dbgLog.finer(i + "-th iteration position =" + nlch[j + 3] + "\t" + nlch[j + 4] + "\t" + nlch[j + 5]);

        if ((nlch[j + 3] == 13) && (nlch[j + 4] == 13) && (nlch[j + 5] == 10)) {
            three++;
        } else if ((nlch[j + 1] == 13) && (nlch[j + 2] == 10)) {
            wcase++;
        }

        buff.rewind();
    }

    boolean windowsNewLine = true;
    if (three == nolines) {
        windowsNewLine = false; // lineTerminator = "0D0D0A"
    } else if ((ucase == nolines) && (wcase < nolines)) {
        windowsNewLine = false; // lineTerminator = "0A"
    } else if ((ucase < nolines) && (wcase == nolines)) {
        windowsNewLine = true; //lineTerminator = "0D0A"
    } else if ((mcase == nolines) && (wcase < nolines)) {
        windowsNewLine = false; //lineTerminator = "0D"
    }

    buff.rewind();
    int PORmarkPosition = POR_MARK_POSITION_DEFAULT;
    if (windowsNewLine) {
        PORmarkPosition = PORmarkPosition + 5;
    } else if (three == nolines) {
        PORmarkPosition = PORmarkPosition + 10;
    }

    byte[] pormark = new byte[8];
    buff.position(PORmarkPosition);
    buff.get(pormark, 0, 8);
    String pormarks = new String(pormark);

    //dbgLog.fine("pormark =>" + pormarks + "<-");
    dbgLog.fine(
            "pormark[hex: 53 50 53 53 50 4F 52 54 == SPSSPORT] =>" + new String(Hex.encodeHex(pormark)) + "<-");

    if (pormarks.equals(POR_MARK)) {
        dbgLog.fine("POR ID toke test: Passed");
        init();

        dataTable.setOriginalFileFormat(MIME_TYPE);
        dataTable.setUnf("UNF:6:NOTCALCULATED");

    } else {
        dbgLog.fine("this file is NOT spss-por type");
        throw new IllegalArgumentException("decodeHeader: POR ID token was not found");
    }

    // save the POR file without new line characters

    FileOutputStream fileOutPOR = null;
    Writer fileWriter = null;

    // Scanner class can handle three-character line-terminator
    Scanner porScanner = null;

    try {
        tempPORfile = File.createTempFile("tempPORfile.", ".por");
        fileOutPOR = new FileOutputStream(tempPORfile);
        fileWriter = new BufferedWriter(new OutputStreamWriter(fileOutPOR, "utf8"));
        porScanner = new Scanner(stream);

        // Because 64-bit and 32-bit machines decode POR's first 40-byte
        // sequence differently, the first 5 leader lines are skipped from
        // the new-line-stripped file

        int lineCounter = 0;
        while (porScanner.hasNextLine()) {
            lineCounter++;
            if (lineCounter <= 5) {
                String line = porScanner.nextLine();
                dbgLog.fine("line=" + lineCounter + ":" + line.length() + ":" + line);
            } else {
                fileWriter.write(porScanner.nextLine());
            }
        }
    } finally {
        try {
            if (fileWriter != null) {
                fileWriter.close();
            }
        } catch (IOException ex) {
            ex.printStackTrace();
        }

        if (porScanner != null) {
            porScanner.close();
        }
    }

    return tempPORfile;
}

From source file:jext2.DataInode.java

/**
 * Read Inode data/*from  w  w  w. java2s  .  c  o m*/
 * @param  size    size of the data to be read
 * @param  offset  start address in data area
 * @return buffer of size size containing data.
 * @throws FileTooLarge
 * @throws IoError
 */
public ByteBuffer readData(int size, long fileOffset) throws JExt2Exception, FileTooLarge {
    /* Returning null may break things somewhere..
     * Zero length buffer breaks something in jlowfuse's c code */
    if (getSize() == 0)
        return ByteBuffer.allocateDirect(1);

    /*
     * size may be larger than the inode.size, it doesn't make sense to return
     * 4k of zeros
     */
    if (size > getSize())
        size = (int) getSize();

    ByteBuffer buf = ByteBuffer.allocateDirect(size);

    int blocksize = superblock.getBlocksize();

    long i = 0;
    long firstBlock = fileOffset / blocksize;
    long offset = fileOffset % blocksize;

    /*
     * just as size may be larger than the inode's data, the number of blocks
     * may also be.
     */
    long approxBlocks = (size / blocksize) + 1;
    long maxBlocks = this.getBlocks() / (superblock.getBlocksize() / 512);
    if (approxBlocks > maxBlocks)
        approxBlocks = maxBlocks;

    while (i < approxBlocks) {
        long start = firstBlock + i;
        long stop = firstBlock + approxBlocks;

        LinkedList<Long> b = accessData().getBlocks(start, stop);
        int blocksRead;

        /*
         * Note on the sparse file support:
         * getBlocks will return null if there is no data block for this
         * logical address. So just move the position count blocks forward.
         */

        if (b == null) { /* hole */
            blocksRead = 1;

            int unboundedLimit = buf.position() + blocksize;
            int limit = Math.min(unboundedLimit, buf.capacity());

            assert limit <= buf.capacity() : "New position, limit " + limit + " is beyond buffer's capacity, "
                    + buf;

            buf.limit(limit);
            buf.position(limit);

            assert buf.limit() == buf.position();

        } else { /* blocks */
            blocksRead = b.size();

            long pos = b.getFirst() * blocksize + offset;
            int unboundedLimit = buf.position() + blocksRead * blocksize;
            int limit = Math.min(unboundedLimit, buf.capacity());

            assert limit <= buf.capacity() : "New limit " + limit + " is beyond buffer's capacity, " + buf;

            buf.limit(limit);
            blockAccess.readToBufferUnsynchronized(pos, buf);
        }

        i += blocksRead;
        offset = 0;

        /* This should be removed soon. IllegalMonitorStateException happen
         * occasionally for unknown reasons.
         */
        try {
            accessData().getHierarchyLock().readLock().unlock();
        } catch (IllegalMonitorStateException e) {
            Logger log = Filesystem.getLogger();
            log.warning("IllegalMonitorStateException encountered in readData, inode=" + this);
            log.warning(String.format(
                    "context for exception: blocks=%s i=%d approxBlocks=%d off=%d buf=%s readlock=%s lock.readlock.holds=%s",
                    b, i, approxBlocks, fileOffset, buf, accessData().getHierarchyLock(),
                    accessData().getHierarchyLock().getReadHoldCount()));
        }

        if (buf.capacity() == buf.limit())
            break;
    }

    assert buf.position() == buf.limit() : "Buffer wasn't filled completely";
    assert buf.limit() == size : "Read buffer size does not match request size";

    if (buf.limit() > getSize())
        buf.limit((int) getSize());

    buf.rewind();
    return buf;
}

From source file:com.amazonaws.services.kinesis.clientlibrary.types.UserRecord.java

/**
 * This method deaggregates the given list of Amazon Kinesis records into a
 * list of KPL user records. Any KPL user records whose explicit hash key or
 * partition key falls outside the range of the startingHashKey and the
 * endingHashKey are discarded from the resulting list. This method will
 * then return the resulting list of KPL user records.
 * //from  w  w  w  .j ava 2s. c  om
 * @param records
 *            A list of Amazon Kinesis records, each possibly aggregated.
 * @param startingHashKey
 *            A BigInteger representing the starting hash key that the
 *            explicit hash keys or partition keys of retained resulting KPL
 *            user records must be greater than or equal to.
 * @param endingHashKey
 *            A BigInteger representing the ending hash key that the the
 *            explicit hash keys or partition keys of retained resulting KPL
 *            user records must be smaller than or equal to.
 * @return A resulting list of KPL user records whose explicit hash keys or
 *          partition keys fall within the range of the startingHashKey and
 *          the endingHashKey.
 */
// CHECKSTYLE:OFF NPathComplexity
public static List<UserRecord> deaggregate(List<Record> records, BigInteger startingHashKey,
        BigInteger endingHashKey) {
    List<UserRecord> result = new ArrayList<>();
    byte[] magic = new byte[AGGREGATED_RECORD_MAGIC.length];
    byte[] digest = new byte[DIGEST_SIZE];

    for (Record r : records) {
        boolean isAggregated = true;
        long subSeqNum = 0;
        ByteBuffer bb = r.getData();

        if (bb.remaining() >= magic.length) {
            bb.get(magic);
        } else {
            isAggregated = false;
        }

        if (!Arrays.equals(AGGREGATED_RECORD_MAGIC, magic) || bb.remaining() <= DIGEST_SIZE) {
            isAggregated = false;
        }

        if (isAggregated) {
            int oldLimit = bb.limit();
            bb.limit(oldLimit - DIGEST_SIZE);
            byte[] messageData = new byte[bb.remaining()];
            bb.get(messageData);
            bb.limit(oldLimit);
            bb.get(digest);
            byte[] calculatedDigest = md5(messageData);

            if (!Arrays.equals(digest, calculatedDigest)) {
                isAggregated = false;
            } else {
                try {
                    Messages.AggregatedRecord ar = Messages.AggregatedRecord.parseFrom(messageData);
                    List<String> pks = ar.getPartitionKeyTableList();
                    List<String> ehks = ar.getExplicitHashKeyTableList();
                    long aat = r.getApproximateArrivalTimestamp() == null ? -1
                            : r.getApproximateArrivalTimestamp().getTime();
                    try {
                        int recordsInCurrRecord = 0;
                        for (Messages.Record mr : ar.getRecordsList()) {
                            String explicitHashKey = null;
                            String partitionKey = pks.get((int) mr.getPartitionKeyIndex());
                            if (mr.hasExplicitHashKeyIndex()) {
                                explicitHashKey = ehks.get((int) mr.getExplicitHashKeyIndex());
                            }

                            BigInteger effectiveHashKey = explicitHashKey != null
                                    ? new BigInteger(explicitHashKey)
                                    : new BigInteger(1, md5(partitionKey.getBytes("UTF-8")));

                            if (effectiveHashKey.compareTo(startingHashKey) < 0
                                    || effectiveHashKey.compareTo(endingHashKey) > 0) {
                                for (int toRemove = 0; toRemove < recordsInCurrRecord; ++toRemove) {
                                    result.remove(result.size() - 1);
                                }
                                break;
                            }

                            ++recordsInCurrRecord;
                            Record record = new Record().withData(ByteBuffer.wrap(mr.getData().toByteArray()))
                                    .withPartitionKey(partitionKey).withSequenceNumber(r.getSequenceNumber())
                                    .withApproximateArrivalTimestamp(aat < 0 ? null : new Date(aat));
                            result.add(new UserRecord(true, record, subSeqNum++, explicitHashKey));
                        }
                    } catch (Exception e) {
                        StringBuilder sb = new StringBuilder();
                        sb.append("Unexpected exception during deaggregation, record was:\n");
                        sb.append("PKS:\n");
                        for (String s : pks) {
                            sb.append(s).append("\n");
                        }
                        sb.append("EHKS: \n");
                        for (String s : ehks) {
                            sb.append(s).append("\n");
                        }
                        for (Messages.Record mr : ar.getRecordsList()) {
                            sb.append("Record: [hasEhk=").append(mr.hasExplicitHashKeyIndex()).append(", ")
                                    .append("ehkIdx=").append(mr.getExplicitHashKeyIndex()).append(", ")
                                    .append("pkIdx=").append(mr.getPartitionKeyIndex()).append(", ")
                                    .append("dataLen=").append(mr.getData().toByteArray().length).append("]\n");
                        }
                        sb.append("Sequence number: ").append(r.getSequenceNumber()).append("\n")
                                .append("Raw data: ")
                                .append(javax.xml.bind.DatatypeConverter.printBase64Binary(messageData))
                                .append("\n");
                        LOG.error(sb.toString(), e);
                    }
                } catch (InvalidProtocolBufferException e) {
                    isAggregated = false;
                }
            }
        }

        if (!isAggregated) {
            bb.rewind();
            result.add(new UserRecord(r));
        }
    }
    return result;
}

From source file:edu.brown.hstore.HStoreSite.java

/**
 * Send the transaction request to another node for execution. We will create
 * a TransactionRedirectCallback that will automatically send the ClientResponse
 * generated from the remote node for this txn back to the client 
 * @param catalog_proc/*from ww  w  .ja v a  2s . com*/
 * @param serializedRequest
 * @param base_partition
 * @param clientCallback
 */
public void transactionRedirect(Procedure catalog_proc, ByteBuffer serializedRequest, int base_partition,
        RpcCallback<ClientResponseImpl> clientCallback) {
    if (debug.val)
        LOG.debug(
                String.format("Forwarding %s request to partition %d [clientHandle=%d]", catalog_proc.getName(),
                        base_partition, StoredProcedureInvocation.getClientHandle(serializedRequest)));

    // Make a wrapper for the original callback so that when the result comes back frm the remote partition
    // we will just forward it back to the client. How sweet is that??
    RedirectCallback callback = null;
    try {
        callback = new RedirectCallback(this);
        // callback = (RedirectCallback)objectPools.CALLBACKS_TXN_REDIRECT_REQUEST.borrowObject();
        callback.init(clientCallback);
    } catch (Exception ex) {
        throw new RuntimeException("Failed to get TransactionRedirectCallback", ex);
    }

    // Mark this request as having been redirected
    // XXX: This sucks because we have to copy the bytes, which will then
    // get copied again when we have to serialize it out to a ByteString
    serializedRequest.rewind();
    ByteBuffer copy = ByteBuffer.allocate(serializedRequest.capacity());
    copy.put(serializedRequest);
    StoredProcedureInvocation.setBasePartition(base_partition, copy);

    this.hstore_coordinator.transactionRedirect(copy.array(), callback, base_partition);
    if (hstore_conf.site.txn_counters)
        TransactionCounter.REDIRECTED.inc(catalog_proc);
}

From source file:org.apache.hadoop.hbase.coprocessor.transactional.SsccRegionEndpoint.java

/**
 * Gives a Pair with first object as Sum and second object as row count,
 * computed for a given combination of column qualifier and column family in
 * the given row range as defined in the Scan object. In its current
 * implementation, it takes one column family and one column qualifier (if
 * provided). In case of null column qualifier, an aggregate sum over all the
 * entire column family will be returned.
 * <p>//www  .j ava 2  s. c o m
 * The average is computed in
 * AggregationClient#avg(byte[], ColumnInterpreter, Scan) by
 * processing results from all regions, so its "ok" to pass sum and a Long
 * type.
 */
@Override
public void getAvg(RpcController controller, SsccTransactionalAggregateRequest request,
        RpcCallback<SsccTransactionalAggregateResponse> done) {
    if (LOG.isTraceEnabled())
        LOG.trace("SsccRegionEndpoint coprocessor: getAvg entry");
    SsccTransactionalAggregateResponse response = null;
    RegionScanner scanner = null;
    try {
        ColumnInterpreter<T, S, P, Q, R> ci = constructColumnInterpreterFromRequest(request);
        S sumVal = null;
        Long rowCountVal = 0L;
        Scan scan = ProtobufUtil.toScan(request.getScan());
        long transactionId = request.getTransactionId();
        long startId = request.getStartId();
        scanner = getScanner(transactionId, startId, scan);
        byte[] colFamily = scan.getFamilies()[0];
        NavigableSet<byte[]> qualifiers = scan.getFamilyMap().get(colFamily);
        byte[] qualifier = null;
        if (qualifiers != null && !qualifiers.isEmpty()) {
            qualifier = qualifiers.pollFirst();
        }
        List<Cell> results = new ArrayList<Cell>();
        boolean hasMoreRows = false;

        do {
            results.clear();
            hasMoreRows = scanner.next(results);
            for (Cell kv : results) {
                sumVal = ci.add(sumVal, ci.castToReturnType(ci.getValue(colFamily, qualifier, kv)));
            }
            rowCountVal++;
        } while (hasMoreRows);
        if (sumVal != null) {
            ByteString first = ci.getProtoForPromotedType(sumVal).toByteString();
            SsccTransactionalAggregateResponse.Builder pair = SsccTransactionalAggregateResponse.newBuilder();
            pair.addFirstPart(first);
            ByteBuffer bb = ByteBuffer.allocate(8).putLong(rowCountVal);
            bb.rewind();
            pair.setSecondPart(ByteString.copyFrom(bb));
            response = pair.build();
        }
    } catch (IOException e) {
        ResponseConverter.setControllerException(controller, e);
    } finally {
        if (scanner != null) {
            try {
                scanner.close();
            } catch (IOException ignored) {
            }
        }
    }
    done.run(response);
}

From source file:org.apache.hadoop.hbase.coprocessor.transactional.SsccRegionEndpoint.java

/**
 * Gives a Pair with first object a List containing Sum and sum of squares,
 * and the second object as row count. It is computed for a given combination of
 * column qualifier and column family in the given row range as defined in the
 * Scan object. In its current implementation, it takes one column family and
 * one column qualifier (if provided). The idea is get the value of variance first:
 * the average of the squares less the square of the average a standard
 * deviation is square root of variance.
 *///from   w ww.j  a v  a 2  s  .c  o  m
@Override
public void getStd(RpcController controller, SsccTransactionalAggregateRequest request,
        RpcCallback<SsccTransactionalAggregateResponse> done) {
    if (LOG.isTraceEnabled())
        LOG.trace("SsccRegionEndpoint coprocessor: getStd entry");
    RegionScanner scanner = null;
    SsccTransactionalAggregateResponse response = null;
    try {
        ColumnInterpreter<T, S, P, Q, R> ci = constructColumnInterpreterFromRequest(request);
        S sumVal = null, sumSqVal = null, tempVal = null;
        long rowCountVal = 0L;
        Scan scan = ProtobufUtil.toScan(request.getScan());
        long transactionId = request.getTransactionId();
        long startId = request.getStartId();
        scanner = getScanner(transactionId, startId, scan);
        byte[] colFamily = scan.getFamilies()[0];
        NavigableSet<byte[]> qualifiers = scan.getFamilyMap().get(colFamily);
        byte[] qualifier = null;
        if (qualifiers != null && !qualifiers.isEmpty()) {
            qualifier = qualifiers.pollFirst();
        }
        List<Cell> results = new ArrayList<Cell>();

        boolean hasMoreRows = false;

        do {
            tempVal = null;
            hasMoreRows = scanner.next(results);
            for (Cell kv : results) {
                tempVal = ci.add(tempVal, ci.castToReturnType(ci.getValue(colFamily, qualifier, kv)));
            }
            results.clear();
            sumVal = ci.add(sumVal, tempVal);
            sumSqVal = ci.add(sumSqVal, ci.multiply(tempVal, tempVal));
            rowCountVal++;
        } while (hasMoreRows);
        if (sumVal != null) {
            ByteString first_sumVal = ci.getProtoForPromotedType(sumVal).toByteString();
            ByteString first_sumSqVal = ci.getProtoForPromotedType(sumSqVal).toByteString();
            SsccTransactionalAggregateResponse.Builder pair = SsccTransactionalAggregateResponse.newBuilder();
            pair.addFirstPart(first_sumVal);
            pair.addFirstPart(first_sumSqVal);
            ByteBuffer bb = ByteBuffer.allocate(8).putLong(rowCountVal);
            bb.rewind();
            pair.setSecondPart(ByteString.copyFrom(bb));
            response = pair.build();
        }
    } catch (IOException e) {
        ResponseConverter.setControllerException(controller, e);
    } finally {
        if (scanner != null) {
            try {
                scanner.close();
            } catch (IOException ignored) {
            }
        }
    }
    done.run(response);
}

From source file:org.apache.hadoop.hbase.coprocessor.transactional.SsccRegionEndpoint.java

/**
 * Gives the row count for the given column family and column qualifier, in
 * the given row range as defined in the Scan object.
 * @throws IOException//from   w  ww .  j  a  v  a  2  s .com
 */
@Override
public void getRowNum(RpcController controller, SsccTransactionalAggregateRequest request,
        RpcCallback<SsccTransactionalAggregateResponse> done) {
    if (LOG.isTraceEnabled())
        LOG.trace("SsccRegionEndpoint coprocessor: getRowNum entry");
    SsccTransactionalAggregateResponse response = null;
    long counter = 0L;
    List<Cell> results = new ArrayList<Cell>();
    RegionScanner scanner = null;
    long transactionId = 0L;
    try {
        Scan scan = ProtobufUtil.toScan(request.getScan());
        byte[][] colFamilies = scan.getFamilies();
        byte[] colFamily = colFamilies != null ? colFamilies[0] : null;
        NavigableSet<byte[]> qualifiers = colFamilies != null ? scan.getFamilyMap().get(colFamily) : null;
        byte[] qualifier = null;
        if (qualifiers != null && !qualifiers.isEmpty()) {
            qualifier = qualifiers.pollFirst();
        }
        if (scan.getFilter() == null && qualifier == null)
            scan.setFilter(new FirstKeyOnlyFilter());
        transactionId = request.getTransactionId();
        long startId = request.getStartId();
        scanner = getScanner(transactionId, startId, scan);
        SsccTransactionState state = this.beginTransIfNotExist(transactionId, startId);
        boolean hasMoreRows = false;
        boolean firstCell;
        do {
            hasMoreRows = scanner.next(results);
            firstCell = true;
            Result verResult = null;
            Result statusResult = null;
            Result colResult = null;
            for (Cell c : results) {
                if (firstCell == true) {
                    if (CellUtil.cloneFamily(c) != DtmConst.TRANSACTION_META_FAMILY) {
                        //get the statusList
                        Get statusGet = new Get(c.getRow()); //TODO: deprecated API
                        if (LOG.isTraceEnabled())
                            LOG.trace("SsccRegionEndpoint coprocessor: getRowNum first row:  " + c.getRow());
                        //statusGet.setTimeStamp(startId);
                        statusGet.addColumn(DtmConst.TRANSACTION_META_FAMILY, SsccConst.STATUS_COL);
                        statusGet.setMaxVersions(DtmConst.MAX_VERSION);
                        statusResult = m_Region.get(statusGet);

                        //get the colList
                        Get colGet = new Get(c.getRow()); //TODO: deprecated API
                        //colGet.setTimeStamp(startId);
                        colGet.addColumn(DtmConst.TRANSACTION_META_FAMILY, SsccConst.COLUMNS_COL);
                        colGet.setMaxVersions(DtmConst.MAX_VERSION);
                        colResult = m_Region.get(colGet);

                        //get the versionList
                        Get verGet = new Get(c.getRow());//TODO: deprecated API
                        //verGet.setTimeStamp(startId);
                        verGet.addColumn(DtmConst.TRANSACTION_META_FAMILY, SsccConst.VERSION_COL);
                        verGet.setMaxVersions(DtmConst.MAX_VERSION);
                        verResult = m_Region.get(verGet);
                        firstCell = false;
                    }

                    if (firstCell == false) {
                        if (state.handleResult(c, statusResult.listCells(), verResult.listCells(),
                                colResult.listCells(), transactionId) == true) {
                            if (LOG.isTraceEnabled())
                                LOG.trace(
                                        "SsccRegionEndpoint coprocessor: getRowNum adding cell: " + c.getRow());
                            counter++;
                            break;
                        }
                    }
                }
            }
            results.clear();
        } while (hasMoreRows);
        ByteBuffer bb = ByteBuffer.allocate(8).putLong(counter);
        bb.rewind();
        response = SsccTransactionalAggregateResponse.newBuilder().addFirstPart(ByteString.copyFrom(bb))
                .build();
    } catch (IOException e) {
        ResponseConverter.setControllerException(controller, e);
    } finally {
        if (scanner != null) {
            try {
                scanner.close();
            } catch (IOException ignored) {
            }
        }
    }
    if (LOG.isTraceEnabled())
        LOG.trace("Row counter for transactionId " + transactionId + " from this region: "
                + env.getRegion().getRegionNameAsString() + " is " + counter);
    done.run(response);
}