Example usage for java.nio ByteBuffer putInt

List of usage examples for java.nio ByteBuffer putInt

Introduction

In this page you can find the example usage for java.nio ByteBuffer putInt.

Prototype

public abstract ByteBuffer putInt(int value);

Source Link

Document

Writes the given int to the current position and increases the position by 4.

Usage

From source file:org.alfresco.repo.search.impl.lucene.index.IndexInfo.java

private void writeString(ByteBuffer buffer, CRC32 crc32, String string) throws UnsupportedEncodingException {
    char[] chars = string.toCharArray();
    byte[] bytes = new byte[chars.length];
    for (int i = 0; i < chars.length; i++) {
        if (chars[i] > 0xFF) {
            throw new UnsupportedEncodingException();
        }//from   ww w  .  j a  v a  2 s.c  om
        bytes[i] = (byte) chars[i];
    }
    buffer.putInt(bytes.length);
    buffer.put(bytes);
    crc32.update(bytes);
}

From source file:org.alfresco.repo.search.impl.lucene.index.IndexInfo.java

private void writeStatusToFile(FileChannel channel) throws IOException {
    long size = getBufferSize();

    ByteBuffer buffer;
    if (useNIOMemoryMapping) {
        MappedByteBuffer mbb = channel.map(MapMode.READ_WRITE, 0, size);
        mbb.load();/*from w  w w  . j  a va2  s . c  om*/
        buffer = mbb;
    } else {
        channel.truncate(size);
        buffer = ByteBuffer.wrap(new byte[(int) size]);
    }

    buffer.position(0);

    buffer.putLong(version);
    CRC32 crc32 = new CRC32();
    crc32.update((int) (version >>> 32) & 0xFFFFFFFF);
    crc32.update((int) (version >>> 0) & 0xFFFFFFFF);

    buffer.putInt(indexEntries.size());
    crc32.update(indexEntries.size());

    for (IndexEntry entry : indexEntries.values()) {
        String entryType = entry.getType().toString();
        writeString(buffer, crc32, entryType);

        writeString(buffer, crc32, entry.getName());

        writeString(buffer, crc32, entry.getParentName());

        String entryStatus = entry.getStatus().toString();
        writeString(buffer, crc32, entryStatus);

        writeString(buffer, crc32, entry.getMergeId());

        buffer.putLong(entry.getDocumentCount());
        crc32.update((int) (entry.getDocumentCount() >>> 32) & 0xFFFFFFFF);
        crc32.update((int) (entry.getDocumentCount() >>> 0) & 0xFFFFFFFF);

        buffer.putLong(entry.getDeletions());
        crc32.update((int) (entry.getDeletions() >>> 32) & 0xFFFFFFFF);
        crc32.update((int) (entry.getDeletions() >>> 0) & 0xFFFFFFFF);

        buffer.put(entry.isDeletOnlyNodes() ? (byte) 1 : (byte) 0);
        crc32.update(entry.isDeletOnlyNodes() ? new byte[] { (byte) 1 } : new byte[] { (byte) 0 });
    }
    buffer.putLong(crc32.getValue());

    if (useNIOMemoryMapping) {
        ((MappedByteBuffer) buffer).force();
    } else {
        buffer.rewind();
        channel.position(0);
        channel.write(buffer);
    }
}

From source file:edu.umass.cs.gigapaxos.SQLPaxosLogger.java

private PendingLogTask[] journal(LogMessagingTask[] packets) {
    if (!ENABLE_JOURNALING)
        return new PendingLogTask[0]; // no error
    if (this.journaler.fos == null)
        return null; // error
    boolean amCoordinator = false, isAccept = false;
    PendingLogTask[] pending = new PendingLogTask[packets.length];
    for (int i = 0; i < packets.length; i++) {
        LogMessagingTask pkt = packets[i];
        amCoordinator = pkt.logMsg instanceof PValuePacket
                ? ((PValuePacket) pkt.logMsg).ballot.coordinatorID == myID
                : pkt.logMsg instanceof PreparePacket
                        ? ((PreparePacket) pkt.logMsg).ballot.coordinatorID == myID
                        : false;//from  w  w w  .  j  a va  2s  .co  m
        isAccept = pkt.logMsg.getType() == PaxosPacketType.ACCEPT;
        if (DONT_LOG_DECISIONS && !isAccept)
            continue;
        if (NON_COORD_ONLY && amCoordinator && !COORD_STRINGIFIES_WO_JOURNALING)
            continue;
        if (COORD_ONLY && !amCoordinator)
            continue;
        if (NON_COORD_DONT_LOG_DECISIONS && !amCoordinator && !isAccept)
            continue;
        if (COORD_DONT_LOG_DECISIONS && amCoordinator && !isAccept)
            continue;

        try {
            {
                byte[] bytes = !NO_STRINGIFY_JOURNALING && !(COORD_JOURNALS_WO_STRINGIFYING && amCoordinator)
                        ? toBytes(pkt.logMsg)
                        : Arrays.copyOf(testBytes, ((RequestPacket) pkt.logMsg).lengthEstimate());
                if (JOURNAL_COMPRESSION)
                    bytes = deflate(bytes);

                // format: <size><message>*
                ByteBuffer bbuf = ByteBuffer.allocate(4 + bytes.length);
                bbuf.putInt(bytes.length);
                bbuf.put(bytes);

                if (ALL_BUT_APPEND)
                    continue;

                if (STRINGIFY_WO_JOURNALING || (COORD_STRINGIFIES_WO_JOURNALING && amCoordinator))
                    continue;

                // else append to log file *after* creating pending task
                if (DB_INDEX_JOURNAL)
                    synchronized (this) {
                        SQLPaxosLogger.this.pendingLogMessages.add(pending[i] = new PendingLogTask(packets[i],
                                this.journaler.curLogfile, this.journaler.curLogfileSize, bytes.length));
                    }
                else if (PAUSABLE_INDEX_JOURNAL)
                    this.messageLog.add(packets[i].logMsg, this.journaler.curLogfile,
                            this.journaler.curLogfileSize, bytes.length);
                if (USE_MAP_DB && Util.oneIn(1000))
                    this.mapDB.dbMemory.commit();
                SQLPaxosLogger.this.journaler.appendToLogFile(bbuf.array(), pkt.logMsg.getPaxosID());
                assert (pending[i] == null
                        || this.journaler.curLogfileSize == pending[i].logfileOffset + bbuf.capacity());
            }

        } catch (IOException ioe) {
            ioe.printStackTrace();
            return null;
        }
    }

    if (this.journaler.curLogfileSize > MAX_LOG_FILE_SIZE) {
        // always commit pending before rolling log file
        log.log(Level.FINE, "{0} rolling log file {1}",
                new Object[] { SQLPaxosLogger.this.journaler, SQLPaxosLogger.this.journaler.curLogfile });
        // DelayProfiler.updateMovAvg("#fgsync",
        // this.pendingLogMessages.size());
        // first sync, then roll log file
        SQLPaxosLogger.this.syncLogMessagesIndex();
        long t = System.currentTimeMillis();
        SQLPaxosLogger.this.journaler.rollLogFile();
        DelayProfiler.updateDelay("rolllog", t, 1.0);

        if (this.journaler.shouldGC()) {
            this.GC.submit(new TimerTask() {
                @Override
                public void run() {
                    try {
                        Thread.currentThread().setPriority(Thread.MIN_PRIORITY);
                        SQLPaxosLogger.this
                                .garbageCollectJournal(SQLPaxosLogger.this.journaler.getGCCandidates());
                    } catch (Exception | Error e) {
                        log.severe(this + " incurred exception " + (e.getMessage() != null ? e.getMessage() : e)
                                + " while garbage collecting logfiles");
                        e.printStackTrace();
                    }
                }
            }, 0);
        }
    }
    if (!DB_INDEX_JOURNAL && Util.oneIn(Integer.MAX_VALUE))
        // used only for testing
        SQLPaxosLogger.deleteOldCheckpoints(logDirectory, SQLPaxosLogger.this.journaler.logfilePrefix, 5, this);

    return pending;
}