Example usage for java.nio ByteBuffer putLong

List of usage examples for java.nio ByteBuffer putLong

Introduction

In this page you can find the example usage for java.nio ByteBuffer putLong.

Prototype

public abstract ByteBuffer putLong(long value);

Source Link

Document

Writes the given long to the current position and increases the position by 8.

Usage

From source file:org.springframework.integration.x.ip.websocket.WebSocketSerializer.java

@Override
public void serialize(final Object frame, OutputStream outputStream) throws IOException {
    String data = "";
    WebSocketFrame theFrame = null;/*w  w w.  ja  v a 2 s  .  com*/
    if (frame instanceof String) {
        data = (String) frame;
        theFrame = new WebSocketFrame(WebSocketFrame.TYPE_DATA, data);
    } else if (frame instanceof WebSocketFrame) {
        theFrame = (WebSocketFrame) frame;
        data = theFrame.getPayload();
    }
    if (data != null && data.startsWith("HTTP/1.1")) {
        outputStream.write(data.getBytes());
        return;
    }
    int lenBytes;
    int payloadLen = this.server ? 0 : 0x80; //masked
    boolean close = theFrame.getType() == WebSocketFrame.TYPE_CLOSE;
    boolean ping = theFrame.getType() == WebSocketFrame.TYPE_PING;
    boolean pong = theFrame.getType() == WebSocketFrame.TYPE_PONG;
    byte[] bytes = theFrame.getBinary() != null ? theFrame.getBinary() : data.getBytes("UTF-8");

    int length = bytes.length;
    if (close) {
        length += 2;
    }
    if (length >= Math.pow(2, 16)) {
        lenBytes = 8;
        payloadLen |= 127;
    } else if (length > 125) {
        lenBytes = 2;
        payloadLen |= 126;
    } else {
        lenBytes = 0;
        payloadLen |= length;
    }
    int mask = (int) System.currentTimeMillis();
    ByteBuffer buffer = ByteBuffer.allocate(length + 6 + lenBytes);
    if (ping) {
        buffer.put((byte) 0x89);
    } else if (pong) {
        buffer.put((byte) 0x8a);
    } else if (close) {
        buffer.put((byte) 0x88);
    } else if (theFrame.getType() == WebSocketFrame.TYPE_DATA_BINARY) {
        buffer.put((byte) 0x82);
    } else {
        // Final fragment; text
        buffer.put((byte) 0x81);
    }
    buffer.put((byte) payloadLen);
    if (lenBytes == 2) {
        buffer.putShort((short) length);
    } else if (lenBytes == 8) {
        buffer.putLong(length);
    }

    byte[] maskBytes = new byte[4];
    if (!server) {
        buffer.putInt(mask);
        buffer.position(buffer.position() - 4);
        buffer.get(maskBytes);
    }
    if (close) {
        buffer.putShort(theFrame.getStatus());
        // TODO: mask status when client
    }
    for (int i = 0; i < bytes.length; i++) {
        if (server) {
            buffer.put(bytes[i]);
        } else {
            buffer.put((byte) (bytes[i] ^ maskBytes[i % 4]));
        }
    }
    outputStream.write(buffer.array(), 0, buffer.position());
}

From source file:org.apache.carbondata.core.indexstore.blockletindex.BlockletDataMap.java

/**
 * Fill the measures min values with minimum , this is needed for backward version compatability
 * as older versions don't store min values for measures
 *//* w  w w  .j  a  v  a  2s. c o  m*/
private byte[][] updateMinValues(byte[][] minValues, int[] minMaxLen) {
    byte[][] updatedValues = minValues;
    if (minValues.length < minMaxLen.length) {
        updatedValues = new byte[minMaxLen.length][];
        System.arraycopy(minValues, 0, updatedValues, 0, minValues.length);
        List<CarbonMeasure> measures = segmentProperties.getMeasures();
        ByteBuffer buffer = ByteBuffer.allocate(8);
        for (int i = 0; i < measures.size(); i++) {
            buffer.rewind();
            DataType dataType = measures.get(i).getDataType();
            if (dataType == DataTypes.BYTE) {
                buffer.putLong(Byte.MIN_VALUE);
                updatedValues[minValues.length + i] = buffer.array().clone();
            } else if (dataType == DataTypes.SHORT) {
                buffer.putLong(Short.MIN_VALUE);
                updatedValues[minValues.length + i] = buffer.array().clone();
            } else if (dataType == DataTypes.INT) {
                buffer.putLong(Integer.MIN_VALUE);
                updatedValues[minValues.length + i] = buffer.array().clone();
            } else if (dataType == DataTypes.LONG) {
                buffer.putLong(Long.MIN_VALUE);
                updatedValues[minValues.length + i] = buffer.array().clone();
            } else if (DataTypes.isDecimal(dataType)) {
                updatedValues[minValues.length + i] = DataTypeUtil
                        .bigDecimalToByte(BigDecimal.valueOf(Long.MIN_VALUE));
            } else {
                buffer.putDouble(Double.MIN_VALUE);
                updatedValues[minValues.length + i] = buffer.array().clone();
            }
        }
    }
    return updatedValues;
}

From source file:org.apache.carbondata.core.indexstore.blockletindex.BlockletDataMap.java

/**
 * Fill the measures max values with maximum , this is needed for backward version compatability
 * as older versions don't store max values for measures
 *///from w ww .  j  av a  2s.c  o  m
private byte[][] updateMaxValues(byte[][] maxValues, int[] minMaxLen) {
    byte[][] updatedValues = maxValues;
    if (maxValues.length < minMaxLen.length) {
        updatedValues = new byte[minMaxLen.length][];
        System.arraycopy(maxValues, 0, updatedValues, 0, maxValues.length);
        List<CarbonMeasure> measures = segmentProperties.getMeasures();
        ByteBuffer buffer = ByteBuffer.allocate(8);
        for (int i = 0; i < measures.size(); i++) {
            buffer.rewind();
            DataType dataType = measures.get(i).getDataType();
            if (dataType == DataTypes.BYTE) {
                buffer.putLong(Byte.MAX_VALUE);
                updatedValues[maxValues.length + i] = buffer.array().clone();
            } else if (dataType == DataTypes.SHORT) {
                buffer.putLong(Short.MAX_VALUE);
                updatedValues[maxValues.length + i] = buffer.array().clone();
            } else if (dataType == DataTypes.INT) {
                buffer.putLong(Integer.MAX_VALUE);
                updatedValues[maxValues.length + i] = buffer.array().clone();
            } else if (dataType == DataTypes.LONG) {
                buffer.putLong(Long.MAX_VALUE);
                updatedValues[maxValues.length + i] = buffer.array().clone();
            } else if (DataTypes.isDecimal(dataType)) {
                updatedValues[maxValues.length + i] = DataTypeUtil
                        .bigDecimalToByte(BigDecimal.valueOf(Long.MAX_VALUE));
            } else {
                buffer.putDouble(Double.MAX_VALUE);
                updatedValues[maxValues.length + i] = buffer.array().clone();
            }
        }
    }
    return updatedValues;
}

From source file:org.voltdb.utils.CatalogUtil.java

private static ByteBuffer makeCatalogVersionAndBytes(int catalogVersion, long txnId, long uniqueId,
        byte[] catalogBytes, byte[] deploymentBytes) {
    ByteBuffer versionAndBytes = ByteBuffer.allocate(4 + // catalog bytes length
            catalogBytes.length + 4 + // deployment bytes length
            deploymentBytes.length + 4 + // catalog version
            8 + // txnID
            8 + // unique ID
            20 + // catalog SHA-1 hash
            20 // deployment SHA-1 hash
    );/*from   w  w w .  j  av  a  2s. c  om*/
    versionAndBytes.putInt(catalogVersion);
    versionAndBytes.putLong(txnId);
    versionAndBytes.putLong(uniqueId);
    try {
        versionAndBytes.put((new InMemoryJarfile(catalogBytes)).getSha1Hash());
    } catch (IOException ioe) {
        VoltDB.crashLocalVoltDB("Unable to build InMemoryJarfile from bytes, should never happen.", true, ioe);
    }
    versionAndBytes.put(makeDeploymentHash(deploymentBytes));
    versionAndBytes.putInt(catalogBytes.length);
    versionAndBytes.put(catalogBytes);
    versionAndBytes.putInt(deploymentBytes.length);
    versionAndBytes.put(deploymentBytes);
    return versionAndBytes;
}

From source file:net.spfbl.core.Core.java

private static long getCodeOTP(byte[] secret, long timeIndex) {
    try {/* w  w w .  ja v  a 2 s . co m*/
        SecretKeySpec signKey = new SecretKeySpec(secret, "HmacSHA1");
        ByteBuffer buffer = ByteBuffer.allocate(8);
        buffer.putLong(timeIndex);
        byte[] timeBytes = buffer.array();
        Mac mac = Mac.getInstance("HmacSHA1");
        mac.init(signKey);
        byte[] hash = mac.doFinal(timeBytes);
        int offset = hash[19] & 0xf;
        long truncatedHash = hash[offset] & 0x7f;
        for (int i = 1; i < 4; i++) {
            truncatedHash <<= 8;
            truncatedHash |= hash[offset + i] & 0xff;
        }
        return (truncatedHash %= 1000000);
    } catch (Exception ex) {
        return 0;
    }
}

From source file:io.pcp.parfait.dxm.PcpMmvWriter.java

private void populateDataBuffer(ByteBuffer dataFileBuffer, Collection<PcpValueInfo> valueInfos)
        throws IOException {

    // Automatically cleanup the file if this is a mapping where we
    // mandate PID checking from the MMV PMDA (MMV_FLAG_PROCESS) and
    // we were able to stash a path name earlier
    if (file != null && flags.contains(MmvFlag.MMV_FLAG_PROCESS)) {
        file.deleteOnExit();/*from ww  w. ja  v a2s  . c om*/
    }

    dataFileBuffer.position(0);
    dataFileBuffer.put(TAG);
    dataFileBuffer.putInt(mmvVersion.getVersion());
    long generation = System.currentTimeMillis() / 1000;
    dataFileBuffer.putLong(generation);
    int gen2Offset = dataFileBuffer.position();
    // Generation 2 will be filled in later, once the file's ready
    dataFileBuffer.putLong(0);
    // 2 TOC blocks, 3 if there are instances
    dataFileBuffer.putInt(tocCount());
    dataFileBuffer.putInt(getFlagMask());
    dataFileBuffer.putInt(getProcessIdentifier());
    dataFileBuffer.putInt(clusterIdentifier);

    Collection<? extends MmvWritable> instanceDomains = getInstanceDomains();
    Collection<? extends MmvWritable> instances = getInstances();
    Collection<? extends MmvWritable> metrics = getMetricInfos();
    Collection<? extends MmvWritable> strings = getStrings();

    int tocBlockIndex = 0;

    if (!instanceDomains.isEmpty()) {
        dataFileBuffer.position(getTocOffset(tocBlockIndex++));
        writeToc(dataFileBuffer, TocType.INSTANCE_DOMAINS, instanceDomains.size(),
                instanceDomains.iterator().next().getOffset());
    }

    if (!instances.isEmpty()) {
        dataFileBuffer.position(getTocOffset(tocBlockIndex++));
        writeToc(dataFileBuffer, TocType.INSTANCES, instances.size(), instances.iterator().next().getOffset());
    }

    dataFileBuffer.position(getTocOffset(tocBlockIndex++));

    int metricsFirstEntryOffset = metrics.isEmpty() ? 0 : metrics.iterator().next().getOffset();
    int valuesFirstEntryOffset = valueInfos.isEmpty() ? 0 : valueInfos.iterator().next().getOffset();

    writeToc(dataFileBuffer, TocType.METRICS, metrics.size(), metricsFirstEntryOffset);
    dataFileBuffer.position(getTocOffset(tocBlockIndex++));
    writeToc(dataFileBuffer, TocType.VALUES, valueInfos.size(), valuesFirstEntryOffset);

    if (!getStrings().isEmpty()) {
        dataFileBuffer.position(getTocOffset(tocBlockIndex++));
        writeToc(dataFileBuffer, TocType.STRINGS, strings.size(), strings.iterator().next().getOffset());
    }

    for (MmvWritable instanceDomain : instanceDomains) {
        instanceDomain.writeToMmv(dataFileBuffer);
    }

    for (MmvWritable info : metrics) {
        info.writeToMmv(dataFileBuffer);
    }

    for (MmvWritable info : valueInfos) {
        info.writeToMmv(dataFileBuffer);
    }

    for (MmvWritable string : strings) {
        string.writeToMmv(dataFileBuffer);
    }

    // Once it's set up, let the agent know
    dataFileBuffer.position(gen2Offset);
    dataFileBuffer.putLong(generation);
}

From source file:srebrinb.compress.sevenzip.SevenZOutputFile.java

/**
 * Finishes the addition of entries to this archive, without closing it.
 * //from   ww w  .  ja va  2s  .c o  m
 * @throws IOException if archive is already closed.
 */
public void finish() throws IOException {
    if (finished) {
        throw new IOException("This archive has already been finished");
    }
    finished = true;

    final long headerPosition = channel.position();

    final ByteArrayOutputStream headerBaos = new ByteArrayOutputStream();
    final DataOutputStream header = new DataOutputStream(headerBaos);

    writeHeader(header);
    header.flush();
    final byte[] headerBytes = headerBaos.toByteArray();
    channel.write(ByteBuffer.wrap(headerBytes));

    final CRC32 crc32 = new CRC32();
    crc32.update(headerBytes);

    ByteBuffer bb = ByteBuffer.allocate(SevenZFile.sevenZSignature.length + 2 /* version */
            + 4 /* start header CRC */
            + 8 /* next header position */
            + 8 /* next header length */
            + 4 /* next header CRC */).order(ByteOrder.LITTLE_ENDIAN);
    // signature header
    channel.position(0);
    bb.put(SevenZFile.sevenZSignature);
    // version
    bb.put((byte) 0).put((byte) 2);

    // placeholder for start header CRC
    bb.putInt(0);

    // start header
    bb.putLong(headerPosition - SevenZFile.SIGNATURE_HEADER_SIZE).putLong(0xffffFFFFL & headerBytes.length)
            .putInt((int) crc32.getValue());
    crc32.reset();
    crc32.update(bb.array(), SevenZFile.sevenZSignature.length + 6, 20);
    bb.putInt(SevenZFile.sevenZSignature.length + 2, (int) crc32.getValue());
    bb.flip();
    channel.write(bb);
}

From source file:com.yobidrive.diskmap.buckets.BucketTableManager.java

private void commitBucketTableToDisk() throws BucketTableManagerException {
    File currentFile = null;/*from ww  w  .j  av  a 2  s  .co  m*/
    FileChannel fileChannel = null;
    ByteBuffer headerBuffer = null;
    try {
        logger.warn("Start commit bucket table...");
        if (bucketTable.getRequestedCheckPoint() == null || bucketTable.getRequestedCheckPoint().isEmpty())
            throw new BucketTableManagerException("commit requested while there is no requested checkpoint");
        currentFile = getLatestCommitedFile();
        File nextFile = getNextFile(getLatestCommitedFile());
        fileChannel = (new RandomAccessFile(nextFile, "rw")).getChannel();
        // Write header with empty checkpoint 
        headerBuffer = ByteBuffer.allocate(HEADERSIZE);
        fileChannel.position(0L);
        headerBuffer.putInt(MAGICSTART);
        headerBuffer.putLong(mapSize);
        // NeedlePointer lastCheckPoint = bucketTable.getLastCheckPoint() ; // Reset checkpoint to no checkpoint done
        NeedlePointer lastCheckPoint = new NeedlePointer(); // Empty needle
        lastCheckPoint.putNeedlePointerToBuffer(headerBuffer);
        headerBuffer.putInt(MAGICEND);
        headerBuffer.flip(); // truncate buffer
        fileChannel.write(headerBuffer);
        // Now writes buffers
        for (int i = 0; i < nbBuffers; i++) {
            bucketTable.prepareBufferForWriting(i);
            int written = fileChannel.write(bucketTable.getBuffer(i));
            if (written < bucketTable.getBuffer(i).limit())
                throw new BucketTableManagerException("Incomplete write for bucket table file "
                        + nextFile.getName() + ", expected " + mapSize + HEADERSIZE);
            // else
            // logger.info("Bucket table commit: written "+(i+1)*entriesPerBuffer+" buckets"+((i<(nbBuffers-1))?"...":"")) ;
            try {
                Thread.sleep(10);
            } catch (Throwable th) {

            }
        }
        // Writes second magic number
        ByteBuffer buffer = ByteBuffer.allocate(NeedleLogInfo.INFOSIZE);
        buffer.rewind();
        buffer.limit(INTSIZE);
        buffer.putInt(MAGICSTART);
        buffer.rewind();
        fileChannel.write(buffer);
        // Write Needle Log Info
        Iterator<NeedleLogInfo> it = logInfoPerLogNumber.values().iterator();
        while (it.hasNext()) {
            buffer.rewind();
            buffer.limit(NeedleLogInfo.INFOSIZE);
            NeedleLogInfo nli = it.next();
            nli.putNeedleLogInfo(buffer, true);
            int written = fileChannel.write(buffer);
            if (written < NeedleLogInfo.INFOSIZE)
                throw new BucketTableManagerException(
                        "Incomplete write for bucket table file, writing log infos " + nextFile.getName());
        }
        // Writes checkpoint
        headerBuffer = ByteBuffer.allocate(NeedlePointer.POINTERSIZE);
        headerBuffer.rewind();
        headerBuffer.limit(NeedlePointer.POINTERSIZE);
        // System.out.println("Writing checkpoint in index "+bucketTable.getRequestedCheckPoint()) ;
        bucketTable.getRequestedCheckPoint().putNeedlePointerToBuffer(headerBuffer, true); // Flip buffer after write
        headerBuffer.rewind();
        // fileChannel.force(false) ;
        if (fileChannel.write(headerBuffer, CHECKPOINTOFFSET) < NeedlePointer.POINTERSIZE) {
            throw new BucketTableManagerException("Could not write checkpoint to " + nextFile.getName());
        }
        fileChannel.force(true);
        fileChannel.close();
        if (!nextFile.renameTo(getCommittedFile(nextFile)))
            throw new BucketTableManagerException(
                    "Could not rename " + nextFile.getName() + " to " + getCommittedFile(nextFile).getName());

        logger.warn("Committed bucket table.");
    } catch (IOException ie) {
        throw new BucketTableManagerException("Failed writting bucket table", ie);
    } finally {
        headerBuffer = null; //May ease garbage collection
        if (fileChannel != null) {
            try {
                fileChannel.close();
            } catch (Exception ex) {
                throw new BucketTableManagerException("Failed to close file channel", ex);
            }
        }
    }
    try {
        if (currentFile != null) {
            if (!currentFile.delete())
                logger.error("Failed deleting previous bucket table" + currentFile.getName());
        }
    } catch (Throwable th) {
        logger.error("Failed deleting previous bucket table" + currentFile.getName(), th);
    }
}