Example usage for java.nio ByteOrder nativeOrder

List of usage examples for java.nio ByteOrder nativeOrder

Introduction

In this page you can find the example usage for java.nio ByteOrder nativeOrder.

Prototype

public static ByteOrder nativeOrder() 

Source Link

Document

Returns the current platform byte order.

Usage

From source file:org.nuras.mcpha.Client.java

/**
 * Get histogram data//w w w .  ja  va  2  s.c  o m
 * 
 * @param chan
 * @return 
 * @throws java.io.IOException 
 */
synchronized public static IntBuffer mcphaGetHistogramData(long chan) throws IOException {
    sendCommand(MCPHA_COMMAND_READ_HISTOGRAM_DATA, chan, 0);

    DataInputStream in = new DataInputStream(deviceSocket.getInputStream());

    ByteBuffer data = ByteBuffer.allocate(65536);
    data.order(ByteOrder.nativeOrder());
    in.readFully(data.array());

    return data.asIntBuffer();
}

From source file:org.nuras.mcpha.Client.java

/**
 * Get oscilloscope data which are 16-bit signed integer values.
 * The channels are interleaved sample-by-sample (ch1, ch2, ch1, ch2, etc).
 * //w w w .j  av a 2s  .c om
 * @return a ShortBuffer of channel data values.
 * @throws java.io.IOException 
 */
synchronized public static ShortBuffer mcphaGetOsilloscopeData() throws IOException {
    sendCommand(MCPHA_COMMAND_READ_OSCILLOSCOPE_DATA, 0L, 0L);

    DataInputStream in = new DataInputStream(deviceSocket.getInputStream());

    ByteBuffer data = ByteBuffer.allocate(65536);
    data.order(ByteOrder.nativeOrder());
    in.readFully(data.array());

    return data.asShortBuffer();
}

From source file:edu.harvard.iq.dvn.ingest.statdataio.impl.plugins.sav.SAVFileReader.java

void decodeRecordType1(BufferedInputStream stream) throws IOException {
    dbgLog.fine("***** decodeRecordType1(): start *****");

    if (stream == null) {
        throw new IllegalArgumentException("stream == null!");
    }//from w ww . j  a v a 2  s .c  o  m
    // how to read each recordType
    // 1. set-up the following objects before reading bytes
    // a. the working byte array
    // b. the storage object
    // the length of this field: 172bytes = 60 + 4 + 12 + 4 + 8 + 84
    // this field consists of 6 distinct blocks

    byte[] recordType1 = new byte[LENGTH_RECORDTYPE1];
    // int caseWeightVariableOBSIndex = 0; 

    try {
        int nbytes = stream.read(recordType1, 0, LENGTH_RECORDTYPE1);

        //printHexDump(recordType1, "recordType1");

        if (nbytes == 0) {
            throw new IOException("reading recordType1: no byte was read");
        }

        // 1.1 60 byte-String that tells the platform/version of SPSS that
        // wrote this file

        int offset_start = 0;
        int offset_end = LENGTH_SPSS_PRODUCT_INFO; // 60 bytes

        String productInfo = new String(Arrays.copyOfRange(recordType1, offset_start, offset_end), "US-ASCII");

        dbgLog.fine("productInfo:\n" + productInfo + "\n");

        // add the info to the fileInfo
        smd.getFileInformation().put("productInfo", productInfo);

        // try to parse out the SPSS version that created this data
        // file: 

        String spssVersionNumberTag = null;

        String regexpVersionNumber = ".*Release ([0-9]*)";
        Pattern patternJsession = Pattern.compile(regexpVersionNumber);
        Matcher matcher = patternJsession.matcher(productInfo);
        if (matcher.find()) {
            spssVersionNumberTag = matcher.group(1);
            dbgLog.fine("SPSS Version Number: " + spssVersionNumberTag);
        }

        if (spssVersionNumberTag != null && !spssVersionNumberTag.equals("")) {
            spssVersionNumber = Integer.valueOf(spssVersionNumberTag).intValue();

            /*
             *  Starting with SPSS version 16, the default encoding is 
             *  UTF-8. 
             *  But we are only going to use it if the user did not explicitly
             *  specify the encoding on the addfiles page. Then we'd want 
             *  to stick with whatever they entered. 
             */
            if (spssVersionNumber > 15) {
                if (getDataLanguageEncoding() == null) {
                    defaultCharSet = "UTF-8";
                }
            }
        }

        smd.getFileInformation().put("charset", defaultCharSet);

        // 1.2) 4-byte file-layout-code (byte-order)

        offset_start = offset_end;
        offset_end += LENGTH_FILE_LAYOUT_CODE; // 4 byte

        ByteBuffer bb_fileLayout_code = ByteBuffer.wrap(recordType1, offset_start, LENGTH_FILE_LAYOUT_CODE);

        ByteBuffer byteOderTest = bb_fileLayout_code.duplicate();
        // interprete the 4 byte as int

        int int2test = byteOderTest.getInt();

        if (int2test == 2 || int2test == 3) {
            dbgLog.fine("integer == " + int2test + ": the byte-oder of the writer is the same "
                    + "as the counterpart of Java: Big Endian");
        } else {
            // Because Java's byte-order is always big endian, 
            // this(!=2) means this sav file was  written on a little-endian machine
            // non-string, multi-bytes blocks must be byte-reversed

            bb_fileLayout_code.order(ByteOrder.LITTLE_ENDIAN);

            int2test = bb_fileLayout_code.getInt();

            if (int2test == 2 || int2test == 3) {
                dbgLog.fine("The sav file was saved on a little endian machine");
                dbgLog.fine("Reveral of the bytes is necessary to decode " + "multi-byte, non-string blocks");

                isLittleEndian = true;

            } else {
                throw new IOException("reading recordType1:unknown file layout code=" + int2test);
            }
        }

        dbgLog.fine("Endian of this platform:" + ByteOrder.nativeOrder().toString());

        smd.getFileInformation().put("OSByteOrder", ByteOrder.nativeOrder().toString());
        smd.getFileInformation().put("byteOrder", int2test);

        // 1.3 4-byte Number_Of_OBS_Units_Per_Case 
        // (= how many RT2 records => how many varilables)

        offset_start = offset_end;
        offset_end += LENGTH_NUMBER_OF_OBS_UNITS_PER_CASE; // 4 byte

        ByteBuffer bb_OBS_units_per_case = ByteBuffer.wrap(recordType1, offset_start,
                LENGTH_NUMBER_OF_OBS_UNITS_PER_CASE);

        if (isLittleEndian) {
            bb_OBS_units_per_case.order(ByteOrder.LITTLE_ENDIAN);
        }

        OBSUnitsPerCase = bb_OBS_units_per_case.getInt();

        dbgLog.fine("RT1: OBSUnitsPerCase=" + OBSUnitsPerCase);

        smd.getFileInformation().put("OBSUnitsPerCase", OBSUnitsPerCase);

        // 1.4 4-byte Compression_Switch

        offset_start = offset_end;
        offset_end += LENGTH_COMPRESSION_SWITCH; // 4 byte

        ByteBuffer bb_compression_switch = ByteBuffer.wrap(recordType1, offset_start,
                LENGTH_COMPRESSION_SWITCH);

        if (isLittleEndian) {
            bb_compression_switch.order(ByteOrder.LITTLE_ENDIAN);
        }

        int compression_switch = bb_compression_switch.getInt();
        if (compression_switch == 0) {
            // data section is not compressed
            isDataSectionCompressed = false;
            dbgLog.fine("data section is not compressed");
        } else {
            dbgLog.fine("data section is compressed:" + compression_switch);
        }

        smd.getFileInformation().put("compressedData", compression_switch);

        // 1.5 4-byte Case-Weight Variable Index
        // warning: this variable index starts from 1, not 0

        offset_start = offset_end;
        offset_end += LENGTH_CASE_WEIGHT_VARIABLE_INDEX; // 4 byte

        ByteBuffer bb_Case_Weight_Variable_Index = ByteBuffer.wrap(recordType1, offset_start,
                LENGTH_CASE_WEIGHT_VARIABLE_INDEX);

        if (isLittleEndian) {
            bb_Case_Weight_Variable_Index.order(ByteOrder.LITTLE_ENDIAN);
        }

        caseWeightVariableOBSIndex = bb_Case_Weight_Variable_Index.getInt();

        smd.getFileInformation().put("caseWeightVariableOBSIndex", caseWeightVariableOBSIndex);

        // 1.6 4-byte Number of Cases

        offset_start = offset_end;
        offset_end += LENGTH_NUMBER_OF_CASES; // 4 byte

        ByteBuffer bb_Number_Of_Cases = ByteBuffer.wrap(recordType1, offset_start, LENGTH_NUMBER_OF_CASES);

        if (isLittleEndian) {
            bb_Number_Of_Cases.order(ByteOrder.LITTLE_ENDIAN);
        }

        int numberOfCases = bb_Number_Of_Cases.getInt();

        if (numberOfCases < 0) {
            // -1 if numberOfCases is unknown
            throw new RuntimeException("number of cases is not recorded in the header");
        } else {
            dbgLog.fine("RT1: number of cases is recorded= " + numberOfCases);
            caseQnty = numberOfCases;
            smd.getFileInformation().put("caseQnty", numberOfCases);
        }

        // 1.7 8-byte compression-bias [not long but double]

        offset_start = offset_end;
        offset_end += LENGTH_COMPRESSION_BIAS; // 8 byte

        ByteBuffer bb_compression_bias = ByteBuffer
                .wrap(Arrays.copyOfRange(recordType1, offset_start, offset_end));

        if (isLittleEndian) {
            bb_compression_bias.order(ByteOrder.LITTLE_ENDIAN);
        }

        Double compressionBias = bb_compression_bias.getDouble();

        if (compressionBias == 100d) {
            // 100 is expected
            dbgLog.fine("compressionBias is 100 as expected");
            smd.getFileInformation().put("compressionBias", 100);
        } else {
            dbgLog.fine("compression bias is not 100: " + compressionBias);
            smd.getFileInformation().put("compressionBias", compressionBias);
        }

        // 1.8 84-byte File Creation Information (date/time: dd MM yyhh:mm:ss +
        // 64-bytelabel)

        offset_start = offset_end;
        offset_end += LENGTH_FILE_CREATION_INFO; // 84 bytes

        String fileCreationInfo = getNullStrippedString(
                new String(Arrays.copyOfRange(recordType1, offset_start, offset_end), "US-ASCII"));

        dbgLog.fine("fileCreationInfo:\n" + fileCreationInfo + "\n");

        String fileCreationDate = fileCreationInfo.substring(0, length_file_creation_date);
        int dateEnd = length_file_creation_date + length_file_creation_time;
        String fileCreationTime = fileCreationInfo.substring(length_file_creation_date, (dateEnd));
        String fileCreationNote = fileCreationInfo.substring(dateEnd, length_file_creation_label);

        dbgLog.fine("fileDate=" + fileCreationDate);
        dbgLog.fine("fileTime=" + fileCreationTime);
        dbgLog.fine("fileNote" + fileCreationNote);

        smd.getFileInformation().put("fileDate", fileCreationDate);
        smd.getFileInformation().put("fileTime", fileCreationTime);
        smd.getFileInformation().put("fileNote", fileCreationNote);
        smd.getFileInformation().put("varFormat_schema", "SPSS");

        // add the info to the fileInfo

        smd.getFileInformation().put("mimeType", MIME_TYPE[0]);
        smd.getFileInformation().put("fileFormat", MIME_TYPE[0]);

        smd.setValueLabelMappingTable(valueVariableMappingTable);

    } catch (IOException ex) {
        //ex.printStackTrace();
        throw ex;
    }

    dbgLog.fine("***** decodeRecordType1(): end *****");
}

From source file:it.unimi.di.big.mg4j.tool.Combine.java

/** Combines several indices into one.
 * /*from  ww w  .j  a v  a  2  s.co  m*/
 * @param ioFactory the factory that will be used to perform I/O.
 * @param outputBasename the basename of the combined index.
 * @param inputBasename the basenames of the input indices.
 * @param delete a monotonically increasing list of integers representing documents that will be deleted from the output index, or <code>null</code>.
 * @param metadataOnly if true, we save only metadata (term list, frequencies, occurrencies).
 * @param requireSizes if true, the sizes of input indices will be forced to be loaded.
 * @param bufferSize the buffer size for index readers.
 * @param writerFlags the flags for the index writer.
 * @param indexType the type of the index to build.
 * @param skips whether to insert skips in case <code>interleaved</code> is true.
 * @param quantum the quantum of skipping structures; if negative, a percentage of space for variable-quantum indices (irrelevant if <code>skips</code> is false).
 * @param height the height of skipping towers (irrelevant if <code>skips</code> is false).
 * @param skipBufferOrCacheSize the size of the buffer used to hold temporarily inverted lists during the skipping structure construction, or the size of the bit cache used when
 * building a {@linkplain QuasiSuccinctIndex quasi-succinct index}.
 * @param logInterval how often we log.
 */
public Combine(final IOFactory ioFactory, final String outputBasename, final String[] inputBasename,
        final IntList delete, final boolean metadataOnly, final boolean requireSizes, final int bufferSize,
        final Map<Component, Coding> writerFlags, IndexType indexType, boolean skips, final int quantum,
        final int height, final int skipBufferOrCacheSize, final long logInterval) throws IOException,
        ConfigurationException, URISyntaxException, ClassNotFoundException, SecurityException,
        InstantiationException, IllegalAccessException, InvocationTargetException, NoSuchMethodException {

    this.logInterval = logInterval;
    this.ioFactory = ioFactory;

    LOGGER.debug("Combining indices " + Arrays.toString(inputBasename) + " into " + outputBasename);

    // We filter query parameters. A bit dirty--must be kept in sync with Index.getInstance().
    this.inputBasename = new String[inputBasename.length];
    for (int i = 0; i < inputBasename.length; i++) {
        final int questionMarkPos = inputBasename[i].indexOf('?');
        this.inputBasename[i] = questionMarkPos == -1 ? inputBasename[i]
                : inputBasename[i].substring(0, questionMarkPos);
    }
    this.outputBasename = outputBasename;
    this.metadataOnly = metadataOnly;
    this.bufferSize = bufferSize;
    needsSizes = writerFlags.get(Component.POSITIONS) == Coding.GOLOMB
            || writerFlags.get(Component.POSITIONS) == Coding.INTERPOLATIVE;

    numIndices = inputBasename.length;
    index = new Index[numIndices];
    indexReader = new IndexReader[numIndices];
    indexIterator = new IndexIterator[numIndices];
    occurrencies = new InputBitStream[numIndices];
    offsets = new InputBitStream[numIndices];
    posNumBits = new InputBitStream[numIndices];
    sumsMaxPos = new InputBitStream[numIndices];
    term = new MutableString[numIndices];
    termReader = new FastBufferedReader[numIndices];
    termQueue = new ObjectHeapSemiIndirectPriorityQueue<MutableString>(term, numIndices);

    // This will remain set if *all* indices to be merged agree. haveSumsMaxPos starts from true only for quasi-succinct indices.
    boolean haveCounts = writerFlags.containsKey(Component.COUNTS),
            havePositions = writerFlags.containsKey(Component.POSITIONS);
    haveSumsMaxPos = haveOccurrencies = true;
    writeSizes = true;
    /* This will be set if *all* indices to be merged agree. Moreover, if some
     * indices disagree we will emit a warning. */
    TermProcessor termProcessor = null;
    /* This will be set if *all* indices to be merged agree. Moreover, if some
     * indices disagree we will emit a warning. */
    Payload payload = null;
    String field = null;
    boolean someOccurrencies = false, someSizes = false, allDataForSizeComputation = true;

    for (int i = 0; i < numIndices; i++) {
        index[i] = Index.getInstance(ioFactory, inputBasename[i], false, requireSizes, false);
        if (i == 0) {
            termProcessor = index[0].termProcessor.copy();
            payload = index[0].payload == null ? null : index[0].payload.copy();
        } else {
            if (!termProcessor.equals(index[i].termProcessor))
                throw new IllegalStateException("The term processor of the first index (" + termProcessor
                        + ") is different from the term processor of index " + i + " (" + index[i].termProcessor
                        + ")");
            if ((payload == null) != (index[i].payload == null)
                    || payload != null && !payload.compatibleWith(index[i].payload))
                throw new IllegalStateException("The payload specification of index " + index[0]
                        + " is not compatible with that of index " + index[i]);
        }

        if (index[i].field != null) {
            if (field == null) {
                if (i != 0)
                    LOGGER.warn("Not all indices specify the field property");
                field = index[i].field;
            } else if (!field.equals(index[i].field))
                LOGGER.warn("Index fields disagree: \"" + field + "\", \"" + index[i].field + "\"");
        }

        haveCounts &= index[i].hasCounts;
        havePositions &= index[i].hasPositions;
        maxCount = Math.max(maxCount, index[i].maxCount);
        indexReader[i] = index[i].getReader(bufferSize);
        if (index[i].properties.getLong(Index.PropertyKeys.OCCURRENCES, -1) == -1)
            numberOfOccurrences = -1;
        if (numberOfOccurrences != -1)
            numberOfOccurrences += index[i].properties.getLong(Index.PropertyKeys.OCCURRENCES);

        final String occurrenciesFile = this.inputBasename[i] + DiskBasedIndex.OCCURRENCIES_EXTENSION;
        haveOccurrencies &= ioFactory.exists(occurrenciesFile);
        someOccurrencies |= ioFactory.exists(occurrenciesFile);
        if (haveOccurrencies)
            occurrencies[i] = new InputBitStream(ioFactory.getInputStream(occurrenciesFile), false);

        final String sumsMaxPosFile = this.inputBasename[i] + DiskBasedIndex.SUMS_MAX_POSITION_EXTENSION;
        haveSumsMaxPos &= ioFactory.exists(sumsMaxPosFile);
        if (haveSumsMaxPos)
            sumsMaxPos[i] = new InputBitStream(ioFactory.getInputStream(sumsMaxPosFile), false);

        if (!metadataOnly) {
            final String offsetsFile = this.inputBasename[i] + DiskBasedIndex.OFFSETS_EXTENSION;
            allDataForSizeComputation &= ioFactory.exists(offsetsFile);
            if (quantum < 0 && allDataForSizeComputation)
                offsets[i] = new InputBitStream(ioFactory.getInputStream(offsetsFile), false);

            if (index[i].hasPositions && indexType != IndexType.QUASI_SUCCINCT) {
                final String positionsLengthsFile = this.inputBasename[i]
                        + DiskBasedIndex.POSITIONS_NUMBER_OF_BITS_EXTENSION;
                allDataForSizeComputation &= ioFactory.exists(positionsLengthsFile);
                if (quantum < 0 && allDataForSizeComputation)
                    posNumBits[i] = new InputBitStream(ioFactory.getInputStream(positionsLengthsFile), false);
            }
        }

        final String sizesFile = this.inputBasename[i] + DiskBasedIndex.SIZES_EXTENSION;
        writeSizes &= ioFactory.exists(sizesFile);
        someSizes |= ioFactory.exists(sizesFile);

        term[i] = new MutableString();
        termReader[i] = new FastBufferedReader(new InputStreamReader(
                ioFactory.getInputStream(this.inputBasename[i] + DiskBasedIndex.TERMS_EXTENSION), "UTF-8"));
        if (termReader[i].readLine(term[i]) != null)
            termQueue.enqueue(i); // If the term list is nonempty, we enqueue it
    }

    if (haveOccurrencies != someOccurrencies)
        LOGGER.warn("Some (but not all) occurencies file missing");
    if (writeSizes != someSizes)
        LOGGER.warn("Some (but not all) sizes file missing");

    additionalProperties = new Properties();
    additionalProperties.setProperty(Index.PropertyKeys.TERMPROCESSOR, ObjectParser.toSpec(termProcessor));
    if (payload != null) {
        if (indexType != IndexType.INTERLEAVED)
            throw new IllegalArgumentException("Payloads are available in interleaved indices only.");
        additionalProperties.setProperty(Index.PropertyKeys.PAYLOADCLASS, payload.getClass().getName());
        //writerFlags.put( Component.PAYLOADS, null );
    }
    additionalProperties.setProperty(Index.PropertyKeys.BATCHES, inputBasename.length);
    if (field != null)
        additionalProperties.setProperty(Index.PropertyKeys.FIELD, field);

    usedIndex = new int[numIndices];
    frequency = new long[numIndices];
    positionArray = new int[Math.max(0, maxCount)];

    numberOfDocuments = combineNumberOfDocuments();

    if ((hasCounts = writerFlags.containsKey(Component.COUNTS)) && !haveCounts)
        throw new IllegalArgumentException("Some of the indices to be combined do not have counts.");
    if ((hasPositions = writerFlags.containsKey(Component.POSITIONS)) && !havePositions)
        throw new IllegalArgumentException("Some of the indices to be combined do not have positions.");
    if ((hasPayloads = writerFlags.containsKey(Component.PAYLOADS)) && payload == null)
        throw new IllegalArgumentException("Indices to be combined do not have payloads.");
    if (indexType == IndexType.QUASI_SUCCINCT && havePositions && (!haveSumsMaxPos || !haveOccurrencies))
        throw new IllegalArgumentException(
                "Quasi-succinct indices require occurrencies and sum of maximum positions to write an index with positions.");
    if (indexType == IndexType.QUASI_SUCCINCT && haveCounts && !haveOccurrencies)
        throw new IllegalArgumentException(
                "Quasi-succinct indices require occurencies to write an index with counts.");
    if (!allDataForSizeComputation && indexType != IndexType.QUASI_SUCCINCT && hasPositions && skips
            && quantum < 0)
        throw new IllegalArgumentException(
                "Some of the indices to be combined do not have offsets or number of bits for positions (and you required variable quanta).");

    // If we have payloads or not all of the index, we are forced to use an interleaved index.
    if (hasPayloads)
        indexType = IndexType.INTERLEAVED;
    if (indexType == IndexType.HIGH_PERFORMANCE && !havePositions)
        throw new IllegalArgumentException(
                "You cannot disable positions or counts for high-performance indices.");
    // High-performance indices always have skips.
    skips |= indexType == IndexType.HIGH_PERFORMANCE;
    if (skips && (quantum == 0 || height < 0))
        throw new IllegalArgumentException("You must specify a nonzero quantum and a nonnegative height");
    // We set up variable quanta only if we have skips, we are not computing just metadata, and the quantum is negative.
    p = indexType != IndexType.QUASI_SUCCINCT && skips && !metadataOnly && quantum < 0 ? -quantum / 100.0 : 0;

    if (p != 0)
        LOGGER.debug(
                "Imposing dynamic " + Util.format(p * 100.0) + "% occupancy of variable-quantum skip lists");

    if (!metadataOnly) {
        switch (indexType) {
        case INTERLEAVED:
            if (!skips)
                indexWriter = new BitStreamIndexWriter(ioFactory, outputBasename, numberOfDocuments, true,
                        writerFlags);
            else
                indexWriter = new SkipBitStreamIndexWriter(ioFactory, outputBasename, numberOfDocuments, true,
                        skipBufferOrCacheSize, writerFlags, skips ? (quantum < 0 ? 0 : quantum) : -1,
                        skips ? height : -1);
            if (skips && quantum < 0)
                variableQuantumIndexWriter = (VariableQuantumIndexWriter) indexWriter;
            break;
        case HIGH_PERFORMANCE:
            if (ioFactory != IOFactory.FILESYSTEM_FACTORY)
                throw new IllegalArgumentException(
                        "High-performance indices currently do not support I/O factories");
            indexWriter = new BitStreamHPIndexWriter(outputBasename, numberOfDocuments, true,
                    skipBufferOrCacheSize, writerFlags, quantum < 0 ? 0 : quantum, height);
            variableQuantumIndexWriter = (VariableQuantumIndexWriter) indexWriter;
            break;
        case QUASI_SUCCINCT:
            indexWriter = quasiSuccinctIndexWriter = new QuasiSuccinctIndexWriter(ioFactory, outputBasename,
                    numberOfDocuments,
                    Fast.mostSignificantBit(quantum < 0 ? QuasiSuccinctIndex.DEFAULT_QUANTUM : quantum),
                    skipBufferOrCacheSize, writerFlags, ByteOrder.nativeOrder());
        }
    }
}

From source file:com.aimfire.gallery.cardboard.PhotoActivity.java

/**
 * Draws a frame for an eye./*from w ww.  j  a  v a 2  s  .c  om*/
 *
 * @param eye The eye to render. Includes all required transformations.
 */
@Override
public void onDrawEye(Eye eye) {
    if (mAssetInd == -1) {
        // we are still showing instruction, return without doing anything
        return;
    }

    if (!mAssetChangedLeft && !mAssetChangedRight) {
        // nothing changed, do nothing and return
        return;
    }

    if (eye.getType() == Eye.Type.LEFT)
        mAssetChangedLeft = false;
    else if (eye.getType() == Eye.Type.RIGHT)
        mAssetChangedRight = false;

    GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT | GLES20.GL_DEPTH_BUFFER_BIT);
    checkGLError("mColorParam");

    GLES20.glUseProgram(mPicProgram);

    GLES20.glUniform1f(mDimRatioParam, mDimRatio);

    GLES20.glActiveTexture(GLES20.GL_TEXTURE0);
    if (eye.getType() == Eye.Type.LEFT) {
        GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, mTextureCurr[0]);
    } else {
        GLES20.glBindTexture(GLES20.GL_TEXTURE_2D, mTextureCurr[1]);
    }

    // set the zoom level
    GLES20.glUniform1f(mZoomParam, sZoom[mImgZoomInd]);

    /*
     * if user prefers negative parallax, shift window on left frame leftward and right frame
     * rightward. if user prefers positive parallax, do the opposite
     */
    if (eye.getType() == Eye.Type.LEFT) {
        GLES20.glUniform1f(mParallaxParam, mImgParallaxAdj / 2.0f);
    } else {
        GLES20.glUniform1f(mParallaxParam, -mImgParallaxAdj / 2.0f);
    }

    // Set the position of the picture
    //float zoomCoords[] = new float[picCoords.length];
    //for(int i=0; i<picCoords.length; i++)
    //zoomCoords[i] = picCoords[i] * zoom[zoomInd];

    //ByteBuffer bblVertices = ByteBuffer.allocateDirect(zoomCoords.length * 4);
    ByteBuffer bblVertices = ByteBuffer.allocateDirect(picCoords.length * 4);
    bblVertices.order(ByteOrder.nativeOrder());
    mPicVertices = bblVertices.asFloatBuffer();
    //mPicVertices.put(zoomCoords);
    mPicVertices.put(picCoords);
    mPicVertices.position(0);

    GLES20.glVertexAttribPointer(mPicPositionParam, COORDS_PER_VERTEX, GLES20.GL_FLOAT, false, vertexStride,
            mPicVertices);

    GLES20.glDrawElements(GLES20.GL_TRIANGLES, /* mode */
            6, /* count */
            GLES20.GL_UNSIGNED_SHORT, /* type */
            mPicElements /* element array buffer offset */
    );
}

From source file:edu.harvard.iq.dataverse.ingest.tabulardata.impl.plugins.sav.SAVFileReader.java

void decodeRecordType1(BufferedInputStream stream) throws IOException {
    dbgLog.fine("***** decodeRecordType1(): start *****");

    if (stream == null) {
        throw new IllegalArgumentException("stream == null!");
    }/*from   w  w  w.j ava2 s.c om*/
    // how to read each recordType
    // 1. set-up the following objects before reading bytes
    // a. the working byte array
    // b. the storage object
    // the length of this field: 172bytes = 60 + 4 + 12 + 4 + 8 + 84
    // this field consists of 6 distinct blocks

    byte[] recordType1 = new byte[LENGTH_RECORDTYPE1];
    // int caseWeightVariableOBSIndex = 0; 

    try {
        int nbytes = stream.read(recordType1, 0, LENGTH_RECORDTYPE1);

        //printHexDump(recordType1, "recordType1");

        if (nbytes == 0) {
            throw new IOException("reading recordType1: no byte was read");
        }

        // 1.1 60 byte-String that tells the platform/version of SPSS that
        // wrote this file

        int offset_start = 0;
        int offset_end = LENGTH_SPSS_PRODUCT_INFO; // 60 bytes

        String productInfo = new String(Arrays.copyOfRange(recordType1, offset_start, offset_end), "US-ASCII");

        dbgLog.fine("productInfo:\n" + productInfo + "\n");
        dataTable.setOriginalFormatVersion(productInfo);

        // try to parse out the SPSS version that created this data
        // file: 

        String spssVersionTag = null;

        String regexpVersionNumber = ".*Release ([0-9]*)";
        Pattern versionTagPattern = Pattern.compile(regexpVersionNumber);
        Matcher matcher = versionTagPattern.matcher(productInfo);
        if (matcher.find()) {
            spssVersionTag = matcher.group(1);
            dbgLog.fine("SPSS Version Number: " + spssVersionTag);
        }

        // TODO: 
        // try a more elaborate regex (like the one for the "new-style" 
        // productInfo line, below), to select the version number, the 
        // minor version number and the platform (windows vs. mac) separately. 
        // would be cleaner to save just that, rather than the entire 
        // productInfo tag. 
        // -- L.A. 4.0 beta

        if (spssVersionTag == null || spssVersionTag.equals("")) {
            // Later versions of SPSS have different formatting of the
            // productInfo line:
            regexpVersionNumber = ".* IBM SPSS STATISTICS.* ([^ ]*) ([0-9][0-9]*)([^ ]*)";
            versionTagPattern = Pattern.compile(regexpVersionNumber);
            matcher = versionTagPattern.matcher(productInfo);
            if (matcher.find()) {
                String spssPlatformTag = matcher.group(1);
                spssVersionTag = matcher.group(2);
                String spssVersionTagMinor = matcher.group(3);

                dbgLog.fine("SPSS Version Number (new style): " + spssVersionTag);
                dbgLog.fine("SPSS Version/Platform Identification (new style:) " + spssPlatformTag + " "
                        + spssVersionTag + spssVersionTagMinor);
                dataTable
                        .setOriginalFormatVersion(spssVersionTag + spssVersionTagMinor + " " + spssPlatformTag);

            }
        }

        if (spssVersionTag != null && !spssVersionTag.equals("")) {
            spssVersionNumber = Integer.valueOf(spssVersionTag).intValue();

            /*
             *  Starting with SPSS version 16, the default encoding is 
             *  UTF-8. 
             *  But we are only going to use it if the user did not explicitly
             *  specify the encoding on the addfiles page. Then we'd want 
             *  to stick with whatever they entered. 
             *  (also, it appears that (starting with the same version 16?)
             *  it is actually possible to define the locale/character set
             *  in the file - section 7, sub-type 20; TODO: decide which 
             *  one takes precedence, if we have the encoding defined both
             *  in the file and through the UI. -- L.A. 4.0 beta)
             */
            if (spssVersionNumber > 15) {
                if (getDataLanguageEncoding() == null) {
                    //defaultCharSet = "windows-1252"; // temporary! -- L.A. "UTF-8";
                    defaultCharSet = "UTF-8";
                }
            }
        }

        // TODO: 
        // decide if we want to save the [determined/guessed] character set
        // somewhere in the dataset object. 
        // this may be relevant in cases when accented/non-latin characters
        // get ingested incorrectly; 
        // -- L.A. 4.0 beta

        // 1.2) 4-byte file-layout-code (byte-order)

        offset_start = offset_end;
        offset_end += LENGTH_FILE_LAYOUT_CODE; // 4 byte

        ByteBuffer bb_fileLayout_code = ByteBuffer.wrap(recordType1, offset_start, LENGTH_FILE_LAYOUT_CODE);

        ByteBuffer byteOderTest = bb_fileLayout_code.duplicate();
        // interprete the 4 byte as int

        int int2test = byteOderTest.getInt();

        if (int2test == 2 || int2test == 3) {
            dbgLog.fine("integer == " + int2test + ": the byte-oder of the writer is the same "
                    + "as the counterpart of Java: Big Endian");
        } else {
            // Because Java's byte-order is always big endian, 
            // this(!=2) means this sav file was  written on a little-endian machine
            // non-string, multi-bytes blocks must be byte-reversed

            bb_fileLayout_code.order(ByteOrder.LITTLE_ENDIAN);

            int2test = bb_fileLayout_code.getInt();

            if (int2test == 2 || int2test == 3) {
                dbgLog.fine("The sav file was saved on a little endian machine");
                dbgLog.fine("Reveral of the bytes is necessary to decode " + "multi-byte, non-string blocks");

                isLittleEndian = true;

            } else {
                throw new IOException("reading recordType1:unknown file layout code=" + int2test);
            }
        }

        dbgLog.fine("Endian of this platform:" + ByteOrder.nativeOrder().toString());

        // 1.3 4-byte Number_Of_OBS_Units_Per_Case 
        // (= how many RT2 records => how many varilables)

        offset_start = offset_end;
        offset_end += LENGTH_NUMBER_OF_OBS_UNITS_PER_CASE; // 4 byte

        ByteBuffer bb_OBS_units_per_case = ByteBuffer.wrap(recordType1, offset_start,
                LENGTH_NUMBER_OF_OBS_UNITS_PER_CASE);

        if (isLittleEndian) {
            bb_OBS_units_per_case.order(ByteOrder.LITTLE_ENDIAN);
        }

        OBSUnitsPerCase = bb_OBS_units_per_case.getInt();

        dbgLog.fine("RT1: OBSUnitsPerCase=" + OBSUnitsPerCase);

        // 1.4 4-byte Compression_Switch

        offset_start = offset_end;
        offset_end += LENGTH_COMPRESSION_SWITCH; // 4 byte

        ByteBuffer bb_compression_switch = ByteBuffer.wrap(recordType1, offset_start,
                LENGTH_COMPRESSION_SWITCH);

        if (isLittleEndian) {
            bb_compression_switch.order(ByteOrder.LITTLE_ENDIAN);
        }

        int compression_switch = bb_compression_switch.getInt();
        if (compression_switch == 0) {
            // data section is not compressed
            isDataSectionCompressed = false;
            dbgLog.fine("data section is not compressed");
        } else {
            dbgLog.fine("data section is compressed:" + compression_switch);
        }

        // 1.5 4-byte Case-Weight Variable Index
        // warning: this variable index starts from 1, not 0

        offset_start = offset_end;
        offset_end += LENGTH_CASE_WEIGHT_VARIABLE_INDEX; // 4 byte

        ByteBuffer bb_Case_Weight_Variable_Index = ByteBuffer.wrap(recordType1, offset_start,
                LENGTH_CASE_WEIGHT_VARIABLE_INDEX);

        if (isLittleEndian) {
            bb_Case_Weight_Variable_Index.order(ByteOrder.LITTLE_ENDIAN);
        }

        caseWeightVariableOBSIndex = bb_Case_Weight_Variable_Index.getInt();

        /// caseWeightVariableOBSIndex will be used later on to locate 
        /// the weight variable; so we'll be able to mark the corresponding
        /// variables properly. 
        // TODO: make sure case weight variables are properly handled! 
        // -- L.A. 4.0 beta
        ///smd.getFileInformation().put("caseWeightVariableOBSIndex", caseWeightVariableOBSIndex);

        // 1.6 4-byte Number of Cases

        offset_start = offset_end;
        offset_end += LENGTH_NUMBER_OF_CASES; // 4 byte

        ByteBuffer bb_Number_Of_Cases = ByteBuffer.wrap(recordType1, offset_start, LENGTH_NUMBER_OF_CASES);

        if (isLittleEndian) {
            bb_Number_Of_Cases.order(ByteOrder.LITTLE_ENDIAN);
        }

        int numberOfCases = bb_Number_Of_Cases.getInt();

        if (numberOfCases < 0) {
            // -1 if numberOfCases is unknown
            throw new RuntimeException("number of cases is not recorded in the header");
        } else {
            dbgLog.fine("RT1: number of cases is recorded= " + numberOfCases);
            dataTable.setCaseQuantity(new Long(numberOfCases));
            ///caseQnty = numberOfCases;
            ///smd.getFileInformation().put("caseQnty", numberOfCases);
        }

        // 1.7 8-byte compression-bias [not long but double]

        offset_start = offset_end;
        offset_end += LENGTH_COMPRESSION_BIAS; // 8 byte

        ByteBuffer bb_compression_bias = ByteBuffer
                .wrap(Arrays.copyOfRange(recordType1, offset_start, offset_end));

        if (isLittleEndian) {
            bb_compression_bias.order(ByteOrder.LITTLE_ENDIAN);
        }

        Double compressionBias = bb_compression_bias.getDouble();

        // TODO: 
        // check if this "compression bias" is being used anywhere? 
        // doesn't seem to be!
        // -- 4.0 alpha
        if (compressionBias == 100d) {
            // 100 is expected
            dbgLog.fine("compressionBias is 100 as expected");
            ///smd.getFileInformation().put("compressionBias", 100);
        } else {
            dbgLog.fine("compression bias is not 100: " + compressionBias);
            ///smd.getFileInformation().put("compressionBias", compressionBias);
        }

        // 1.8 84-byte File Creation Information (date/time: dd MM yyhh:mm:ss +
        // 64-bytelabel)

        offset_start = offset_end;
        offset_end += LENGTH_FILE_CREATION_INFO; // 84 bytes

        String fileCreationInfo = getNullStrippedString(
                new String(Arrays.copyOfRange(recordType1, offset_start, offset_end), "US-ASCII"));

        dbgLog.fine("fileCreationInfo:\n" + fileCreationInfo + "\n");

        String fileCreationDate = fileCreationInfo.substring(0, length_file_creation_date);
        int dateEnd = length_file_creation_date + length_file_creation_time;
        String fileCreationTime = fileCreationInfo.substring(length_file_creation_date, (dateEnd));
        String fileCreationNote = fileCreationInfo.substring(dateEnd, length_file_creation_label);

        dbgLog.fine("fileDate=" + fileCreationDate);
        dbgLog.fine("fileTime=" + fileCreationTime);
        dbgLog.fine("fileNote" + fileCreationNote);

    } catch (IOException ex) {
        throw ex;
    }

    dbgLog.fine("decodeRecordType1(): end");
}

From source file:org.bimserver.GeometryGenerator.java

private void setTransformationMatrix(GeometryInfo geometryInfo, float[] transformationMatrix) {
    ByteBuffer byteBuffer = ByteBuffer.allocate(16 * 4);
    byteBuffer.order(ByteOrder.nativeOrder());
    FloatBuffer asFloatBuffer = byteBuffer.asFloatBuffer();
    for (float f : transformationMatrix) {
        asFloatBuffer.put(f);//from w  ww .  j  av  a  2 s .  c o  m
    }
    geometryInfo.setTransformation(byteBuffer.array());
}

From source file:haven.Utils.java

public static ByteBuffer mkbbuf(int n) {
    try {//from   w w w . ja  v  a  2  s  . co m
        return (ByteBuffer.allocateDirect(n).order(ByteOrder.nativeOrder()));
    } catch (OutOfMemoryError e) {
        /* At least Sun's class library doesn't try to collect
         * garbage if it's out of direct memory, which is pretty
        * stupid. So do it for it, then. */
        System.gc();
        return (ByteBuffer.allocateDirect(n).order(ByteOrder.nativeOrder()));
    }
}

From source file:org.bimserver.geometry.StreamingGeometryGenerator.java

void setTransformationMatrix(VirtualObject geometryInfo, double[] transformationMatrix)
        throws BimserverDatabaseException {
    ByteBuffer byteBuffer = ByteBuffer.allocate(16 * 8);
    byteBuffer.order(ByteOrder.nativeOrder());
    DoubleBuffer asDoubleBuffer = byteBuffer.asDoubleBuffer();
    for (double d : transformationMatrix) {
        asDoubleBuffer.put(d);// w w w. j a  va  2  s . c  o  m
    }
    geometryInfo.setAttribute(GeometryPackage.eINSTANCE.getGeometryInfo_Transformation(), byteBuffer.array());
}

From source file:org.nd4j.linalg.Nd4jTestsC.java

@Test
public void testNullPointerDataBuffer() {
    DataBuffer.Type initialType = Nd4j.dataType();

    DataTypeUtil.setDTypeForContext(DataBuffer.Type.FLOAT);

    ByteBuffer allocate = ByteBuffer.allocateDirect(10 * 4).order(ByteOrder.nativeOrder());
    allocate.asFloatBuffer().put(new float[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 });
    DataBuffer buff = Nd4j.createBuffer(allocate, DataBuffer.Type.FLOAT, 10);
    float sum = Nd4j.create(buff).sumNumber().floatValue();
    System.out.println(sum);/*w  w w .j  a v a 2s  . co m*/
    assertEquals(55f, sum, 0.001f);

    DataTypeUtil.setDTypeForContext(initialType);
}