Example usage for java.nio ByteBuffer rewind

List of usage examples for java.nio ByteBuffer rewind

Introduction

In this page you can find the example usage for java.nio ByteBuffer rewind.

Prototype

public final Buffer rewind() 

Source Link

Document

Rewinds this buffer.

Usage

From source file:org.apache.hadoop.raid.JRSDecoder.java

void writeFixedBlock(FSDataInputStream[] inputs, int[] erasedLocations,
        Map<Integer, LocatedBlock> corruptStripe, File[] lbfs, long[] limits, JRSStreamFactory sf)
        throws IOException {

    long limit = 0;

    for (int i = 0; i < limits.length; i++)
        if (limit < limits[i])
            limit = limits[i];//from   w  w  w.j a v a  2s. co  m

    // Loop while the number of skipped + read bytes is less than the max.
    int seq = 0;

    for (long read = 0; read < limit;) {

        int failNum = erasedLocations.length;
        int bufOffset = bufSize * stripeSize;
        ByteBuffer buf = ByteBuffer.allocate(bufOffset + 64);
        buf.putInt(bufOffset, seq);

        boolean important = false;

        //last threadNum# packet checked
        if ((limit - read + bufSize - 1) / bufSize <= threadNum) {
            important = true;
            buf.put(bufOffset + 4, (byte) 1);
        } else {
            buf.put(bufOffset + 4, (byte) 0);
        }
        LOG.info("anchor Decode_stripe " + seq + " Data_reading " + System.nanoTime());
        //read packets
        buf.rewind();
        erasedLocations = readFromInputs(inputs, erasedLocations, buf, sf, seq);
        LOG.info("anchor Decode_stripe " + seq + " Data_read " + System.nanoTime());

        int toRead = (int) Math.min((long) bufSize, limit - read);

        buf.rewind();

        //finding the best ring buffer
        int remain = -1;
        int chosen = -1;
        for (int i = 0; i < threadNum; i++) {
            int rc = q[i].remainingCapacity();
            if (remain < rc) {
                remain = rc;
                chosen = i;
            }
        }
        if (important) {
            chosen = (((int) (limit - read) + bufSize - 1) / bufSize - 1) % threadNum;
        }

        DecodePackage dp = (new DecodePackage(erasedLocations, buf)).limits(limits).localFiles(lbfs);

        //dispatch
        boolean flag = true;
        while (flag) {
            flag = false;
            try {
                q[chosen].put(dp);
            } catch (InterruptedException e) {
                flag = true;
            }
        }
        LOG.info("anchor Decode_stripe " + seq + " Data_pushed " + System.nanoTime());

        seq++;
        read += toRead;
    }

    //waiting for the end of the decode
    for (int i = 0; i < threadNum; i++) {
        boolean flag = true;
        while (flag) {
            flag = false;
            try {
                p[i].take();
            } catch (InterruptedException e) {
                flag = true;
            }
        }

    }

}

From source file:org.apache.hadoop.raid.PMDecoder.java

void writeFixedBlock(FSDataInputStream[] inputs, int[] erasedLocations, int[] validErasedLocations, long limit,
        byte[] outBuf, PMStreamFactory sf) throws IOException {

    int seq = 0;/*from   ww w.  ja va 2  s .  c o  m*/

    for (long read = 0; read < limit;) {

        int failNum = validErasedLocations.length;
        int bufOffset = encodedBufSize * (stripeSize + paritySize - failNum);
        ByteBuffer buf = ByteBuffer.allocate(bufOffset + 64);
        buf.putInt(bufOffset, seq);

        boolean important = false;

        //last threadNum# packet checked
        if ((limit - read + bufSize - 1) / bufSize <= threadNum) {
            important = true;
            buf.put(bufOffset + 4, (byte) 1);
        } else {
            buf.put(bufOffset + 4, (byte) 0);
        }
        LOG.info("anchor Decode_stripe " + seq + " Data_reading " + System.nanoTime());
        //read packets
        buf.rewind();
        validErasedLocations = readFromInputs(inputs, validErasedLocations, buf, sf, seq);
        LOG.info("anchor Decode_stripe " + seq + " Data_read " + System.nanoTime());
        buf.rewind();

        int toRead = (int) Math.min((long) bufSize, limit - read);

        //finding the best ring buffer
        int remain = -1;
        int chosen = -1;
        for (int i = 0; i < threadNum; i++) {
            int rc = q[i].remainingCapacity();
            if (remain < rc) {
                remain = rc;
                chosen = i;
            }
        }
        if (important) {
            chosen = (int) (((limit - read + bufSize - 1) / bufSize - 1) % threadNum);
        }

        DecodePackage dp = (new DecodePackage(erasedLocations, validErasedLocations, buf)).limit(limit)
                .outputBuffer(outBuf);
        //dispatch
        boolean flag = true;
        while (flag) {
            flag = false;
            try {
                q[chosen].put(dp);
            } catch (InterruptedException e) {
                Thread.currentThread().interrupt();
                flag = true;
            }
        }
        LOG.info("anchor Decode_stripe " + seq + " Data_pushed " + System.nanoTime());

        seq++;
        read += toRead;
    }

    //waiting for the end of the decode
    for (int i = 0; i < threadNum; i++) {
        boolean flag = true;
        while (flag) {
            flag = false;
            try {
                p[i].take();
            } catch (InterruptedException e) {
                Thread.currentThread().interrupt();
                flag = true;
            }
        }
    }
}

From source file:com.android.camera.one.v2.OneCameraZslImpl.java

/**
 * Given an image reader, extracts the JPEG image bytes and then closes the
 * reader.//from w  ww  .  j  ava2 s  .c  o  m
 *
 * @param img the image from which to extract jpeg bytes or compress to
 *            jpeg.
 * @param degrees the angle to rotate the image clockwise, in degrees. Rotation is
 *            only applied to YUV images.
 * @return The bytes of the JPEG image. Newly allocated.
 */
private byte[] acquireJpegBytes(Image img, int degrees) {
    ByteBuffer buffer;

    if (img.getFormat() == ImageFormat.JPEG) {
        Image.Plane plane0 = img.getPlanes()[0];
        buffer = plane0.getBuffer();

        byte[] imageBytes = new byte[buffer.remaining()];
        buffer.get(imageBytes);
        buffer.rewind();
        return imageBytes;
    } else if (img.getFormat() == ImageFormat.YUV_420_888) {
        buffer = mJpegByteBufferPool.acquire();
        if (buffer == null) {
            buffer = ByteBuffer.allocateDirect(img.getWidth() * img.getHeight() * 3);
        }

        int numBytes = JpegUtilNative.compressJpegFromYUV420Image(new AndroidImageProxy(img), buffer,
                JPEG_QUALITY, degrees);

        if (numBytes < 0) {
            throw new RuntimeException("Error compressing jpeg.");
        }

        buffer.limit(numBytes);

        byte[] imageBytes = new byte[buffer.remaining()];
        buffer.get(imageBytes);

        buffer.clear();
        mJpegByteBufferPool.release(buffer);

        return imageBytes;
    } else {
        throw new RuntimeException("Unsupported image format.");
    }
}

From source file:org.apache.hadoop.raid.PMDecoder.java

void writeFixedBlock(FSDataInputStream[] inputs, int[] erasedLocations, int[] validErasedLocations,
        Map<Integer, LocatedBlock> corruptStripe, File[] lbfs, long[] limits, PMStreamFactory sf)
        throws IOException {

    long limit = 0;

    for (int i = 0; i < limits.length; i++)
        if (limit < limits[i])
            limit = limits[i];//from   www.  j  a  va 2  s .  co m

    int seq = 0;

    for (long read = 0; read < limit;) {

        int failNum = validErasedLocations.length;
        int bufOffset = encodedBufSize * (stripeSize + paritySize - failNum);
        ByteBuffer buf = ByteBuffer.allocate(bufOffset + 64);
        buf.putInt(bufOffset, seq);
        //LOG.info("bufOffset: "+bufOffset+"encodedBufSize: "+encodedBufSize);

        boolean important = false;

        //last threadNum# packet checked
        if ((limit - read + bufSize - 1) / bufSize <= threadNum) {
            important = true;
            buf.put(bufOffset + 4, (byte) 1);
        } else {
            buf.put(bufOffset + 4, (byte) 0);
        }
        LOG.info("anchor Decode_stripe " + seq + " Data_reading " + System.nanoTime());
        //read packets
        buf.rewind();
        validErasedLocations = readFromInputs(inputs, validErasedLocations, buf, sf, seq);
        LOG.info("anchor Decode_stripe " + seq + " Data_read " + System.nanoTime());

        int toRead = (int) Math.min((long) bufSize, limit - read);

        buf.rewind();

        //finding the best ring buffer
        int remain = -1;
        int chosen = -1;
        for (int i = 0; i < threadNum; i++) {
            int rc = q[i].remainingCapacity();
            if (remain < rc) {
                remain = rc;
                chosen = i;
            }
        }
        if (important) {
            chosen = (int) ((((limit - read) + bufSize - 1) / bufSize - 1) % threadNum);
        }

        DecodePackage dp = (new DecodePackage(erasedLocations, validErasedLocations, buf)).limits(limits)
                .localFiles(lbfs);
        //dispatch
        boolean flag = true;
        while (flag) {
            flag = false;
            try {
                q[chosen].put(dp);
            } catch (InterruptedException e) {
                Thread.currentThread().interrupt();
                flag = true;
            }
        }
        LOG.info("anchor Decode_stripe " + seq + " Data_pushed " + System.nanoTime());

        seq++;
        read += toRead;
    }

    //waiting for the end of the decode
    for (int i = 0; i < threadNum; i++) {
        boolean flag = true;
        while (flag) {
            flag = false;
            try {
                p[i].take();
            } catch (InterruptedException e) {
                Thread.currentThread().interrupt();
                flag = true;
            }
        }

    }
}

From source file:edu.northwestern.jcr.adapter.fedora.persistence.FedoraConnectorREST.java

/**
 * Wrapper of getDatastreamDissemination in REST.
 *
 * @param pid pid of the object//from  w  ww .  j ava 2s  . c o m
 * @param dsID id of the datastream
 * @return byte content of the data stream
 */
public byte[] getDataStream(String pid, String dsID) {
    HttpInputStream inputStream;
    ReadableByteChannel channel;
    ByteBuffer buf;
    byte[] bytes;
    int numRead = 0;
    int length = 0;

    try {
        inputStream = fc.get(
                String.format("/objects/%s/datastreams/%s/content", URLEncoder.encode(pid, "UTF-8"), dsID),
                true, false);
    } catch (Exception e) {
        return null;
    }

    channel = Channels.newChannel(inputStream);
    // Create a direct ByteBuffer
    buf = ByteBuffer.allocateDirect(10 * 1024 * 1024);

    while (numRead >= 0) {
        // Read bytes from the channel
        try {
            numRead = channel.read(buf);
        } catch (Exception e) {
            return null;
        }

        if (numRead > 0) {
            length += numRead;
        }
    }

    bytes = new byte[length];
    // reset the position of the buffer to zero
    buf.rewind();
    buf.get(bytes);

    return bytes;
}

From source file:org.apache.hadoop.raid.IADecoder.java

void writeFixedBlock(FSDataInputStream[] inputs, int[] erasedLocations, int[] validErasedLocations,
        Map<Integer, LocatedBlock> corruptStripe, File[] lbfs, long[] limits, IAStreamFactory sf)
        throws IOException {

    long limit = 0;

    for (int i = 0; i < limits.length; i++)
        if (limit < limits[i])
            limit = limits[i];// w  w  w . j  av  a2 s .c o m

    int seq = 0;

    for (long read = 0; read < limit;) {

        boolean important = false;

        LOG.info("anchor Decode_stripe " + seq + " Data_reading " + System.nanoTime());
        //read packets
        ReadPackage rp = readFromInputs(inputs, validErasedLocations, sf, seq);
        ByteBuffer buf = rp.buf;
        validErasedLocations = rp.validErasedLocations;
        int bufOffset = encodedBufSize * (stripeSize + paritySize - validErasedLocations.length);
        LOG.info("anchor Decode_stripe " + seq + " Data_read " + System.nanoTime());

        //last threadNum# packet checked
        if ((limit - read + bufSize - 1) / bufSize <= threadNum) {
            important = true;
            buf.put(bufOffset + 4, (byte) 1);
        } else {
            buf.put(bufOffset + 4, (byte) 0);
        }

        int toRead = (int) Math.min((long) bufSize, limit - read);

        buf.rewind();

        //finding the best ring buffer
        int remain = -1;
        int chosen = -1;
        for (int i = 0; i < threadNum; i++) {
            int rc = q[i].remainingCapacity();
            if (remain < rc) {
                remain = rc;
                chosen = i;
            }
        }
        if (important) {
            chosen = (int) ((((limit - read) + bufSize - 1) / bufSize - 1) % threadNum);
        }

        DecodePackage dp = (new DecodePackage(erasedLocations, validErasedLocations, buf)).limits(limits)
                .localFiles(lbfs);
        //dispatch
        boolean flag = true;
        while (flag) {
            flag = false;
            try {
                q[chosen].put(dp);
            } catch (InterruptedException e) {
                Thread.currentThread().interrupt();
                flag = true;
            }
        }
        LOG.info("anchor Decode_stripe " + seq + " Data_pushed " + System.nanoTime());

        seq++;
        read += toRead;
    }

    //waiting for the end of the decode
    for (int i = 0; i < threadNum; i++) {
        boolean flag = true;
        while (flag) {
            flag = false;
            try {
                p[i].take();
            } catch (InterruptedException e) {
                Thread.currentThread().interrupt();
                flag = true;
            }
        }

    }
}

From source file:org.apache.synapse.transport.pipe.PipeEndpointListener.java

public void run() {
    running = true;/*from  w w w.  j a  va2  s  .  c o  m*/
    ByteBuffer readBuffer = ByteBuffer.allocate(1024);
    try {
        while (true) {
            ProtocolDecoder decoder;
            decoder = endpoint.getProtocol().createProtocolDecoder();
            while (true) {
                while (decoder.inputRequired()) {
                    int c;
                    try {
                        c = readChannel.read(readBuffer);
                    } catch (IOException ex) {
                        log.error("Error while reading from pipe " + endpoint.getPipe().getAbsolutePath()
                                + "; shutting down listener", ex);
                        return;
                    }
                    if (c == -1) {
                        log.error("Pipe " + endpoint.getPipe().getAbsolutePath()
                                + " was unexpectedly closed; shutting down listener");
                        return;
                    }
                    synchronized (guard) {
                        if (!running) {
                            return;
                        }
                    }
                    decoder.decode(readBuffer.array(), 0, readBuffer.position());
                    readBuffer.rewind();
                }
                byte[] message = decoder.getNext();
                callback.receive(endpoint, message, message.length, null);
            }
        }
    } finally {
        try {
            pipe.close();
            if (log.isDebugEnabled()) {
                log.debug("Pipe " + endpoint.getPipe().getAbsolutePath() + " closed");
            }
        } catch (IOException ex) {
            log.warn("Error while closing pipe " + endpoint.getPipe().getAbsolutePath(), ex);
        }
        done.countDown();
    }
}

From source file:org.bimserver.collada.ColladaSerializer.java

private void setGeometry(PrintWriter out, IfcProduct ifcProductObject, String material)
        throws RenderEngineException, SerializerException {
    // Mostly just skips IfcOpeningElements which one would probably not want to end up in the Collada file.
    if (ifcProductObject instanceof IfcFeatureElementSubtraction)
        return;/* w ww. j a va2s .c  o m*/
    //
    GeometryInfo geometryInfo = ifcProductObject.getGeometry();
    if (geometryInfo != null && geometryInfo.getTransformation() != null) {
        GeometryData geometryData = geometryInfo.getData();
        ByteBuffer indicesBuffer = ByteBuffer.wrap(geometryData.getIndices());
        indicesBuffer.order(ByteOrder.LITTLE_ENDIAN);
        // TODO: In Blender (3d modeling tool) and Three.js, normals are ignored in favor of vertex order. The incoming geometry seems to be in order 0 1 2 when it needs to be in 1 0 2. Need more test cases.
        // Failing order: (0, 1050, 2800), (0, 1050, 3100), (3580, 1050, 3100)
        // Successful order: (0, 1050, 3100), (0, 1050, 2800), (3580, 1050, 3100)
        List<Integer> list = new ArrayList<Integer>();
        while (indicesBuffer.hasRemaining())
            list.add(indicesBuffer.getInt());
        indicesBuffer.rewind();
        for (int i = 0; i < list.size(); i += 3) {
            Integer first = list.get(i);
            Integer next = list.get(i + 1);
            list.set(i, next);
            list.set(i + 1, first);
        }
        // Positions the X or the Y or the Z of (X, Y, Z).
        ByteBuffer positionsBuffer = ByteBuffer.wrap(geometryData.getVertices());
        positionsBuffer.order(ByteOrder.LITTLE_ENDIAN);
        // Do pass to find highest Z for considered objects.
        while (positionsBuffer.hasRemaining()) {
            float x = positionsBuffer.getFloat();
            float y = positionsBuffer.getFloat();
            float z = positionsBuffer.getFloat();
            // X
            if (x > highestObserved.x())
                highestObserved.x(x);
            else if (x < lowestObserved.x())
                lowestObserved.x(x);
            // Y
            if (y > highestObserved.y())
                highestObserved.y(y);
            else if (y < lowestObserved.y())
                lowestObserved.y(y);
            // Z
            if (z > highestObserved.z())
                highestObserved.z(z);
            else if (z < lowestObserved.z())
                lowestObserved.z(z);
        }
        positionsBuffer.rewind();
        //
        ByteBuffer normalsBuffer = ByteBuffer.wrap(geometryData.getNormals());
        normalsBuffer.order(ByteOrder.LITTLE_ENDIAN);
        // Create a geometry identification number in the form of: geom-320450
        long oid = ifcProductObject.getOid();
        String id = String.format("geom-%d", oid);
        // If the material doesn't exist in the converted map, add it.
        if (!converted.containsKey(material))
            converted.put(material, new HashSet<IfcProduct>());
        // Add the current IfcProduct to the appropriate entry in the material map.
        converted.get(material).add(ifcProductObject);
        // Name for geometry.
        String name = (ifcProductObject.getGlobalId() == null) ? "[NO_GUID]" : ifcProductObject.getGlobalId();
        // Counts.
        int vertexComponentsTotal = positionsBuffer.capacity() / 4,
                normalComponentsTotal = normalsBuffer.capacity() / 4;
        int verticesCount = positionsBuffer.capacity() / 12, normalsCount = normalsBuffer.capacity() / 12,
                triangleCount = indicesBuffer.capacity() / 12;
        // Vertex scalars as one long string: 4.05 2 1 55.0 34.01 2
        String stringPositionScalars = byteBufferToFloatingPointSpaceDelimitedString(positionsBuffer);
        // Normal scalars as one long string: 4.05 2 1 55.0 34.01 2
        String stringNormalScalars = byteBufferToFloatingPointSpaceDelimitedString(normalsBuffer); //doubleBufferToFloatingPointSpaceDelimitedString(flippedNormalsBuffer);
        // Vertex indices as one long string: 1 0 2 0 3 2 5 4 6
        String stringIndexScalars = listToSpaceDelimitedString(list, intFormat);
        // Write geometry block for this IfcProduct (i.e. IfcRoof, IfcSlab, etc).
        out.println(" <geometry id=\"" + id + "\" name=\"" + name + "\">");
        out.println("  <mesh>");
        out.println("   <source id=\"positions-" + oid + "\" name=\"positions-" + oid + "\">");
        out.println("    <float_array id=\"positions-array-" + oid + "\" count=\"" + vertexComponentsTotal
                + "\">" + stringPositionScalars + "</float_array>");
        out.println("    <technique_common>");
        out.println("     <accessor count=\"" + verticesCount + "\" offset=\"0\" source=\"#positions-array-"
                + oid + "\" stride=\"3\">");
        out.println("      <param name=\"X\" type=\"float\"></param>");
        out.println("      <param name=\"Y\" type=\"float\"></param>");
        out.println("      <param name=\"Z\" type=\"float\"></param>");
        out.println("     </accessor>");
        out.println("    </technique_common>");
        out.println("   </source>");
        out.println("   <source id=\"normals-" + oid + "\" name=\"normals-" + oid + "\">");
        out.println("    <float_array id=\"normals-array-" + oid + "\" count=\"" + normalComponentsTotal + "\">"
                + stringNormalScalars + "</float_array>");
        out.println("    <technique_common>");
        out.println("     <accessor count=\"" + normalsCount + "\" offset=\"0\" source=\"#normals-array-" + oid
                + "\" stride=\"3\">");
        out.println("      <param name=\"X\" type=\"float\"></param>");
        out.println("      <param name=\"Y\" type=\"float\"></param>");
        out.println("      <param name=\"Z\" type=\"float\"></param>");
        out.println("     </accessor>");
        out.println("    </technique_common>");
        out.println("   </source>");
        out.println("   <vertices id=\"vertices-" + oid + "\">");
        out.println("    <input semantic=\"POSITION\" source=\"#positions-" + oid + "\"/>");
        out.println("    <input semantic=\"NORMAL\" source=\"#normals-" + oid + "\"/>");
        out.println("   </vertices>");
        out.println("   <triangles count=\"" + triangleCount + "\" material=\"Material-" + oid + "\">");
        out.println("    <input offset=\"0\" semantic=\"VERTEX\" source=\"#vertices-" + oid + "\"/>");
        out.println("    <p>" + stringIndexScalars + "</p>");
        out.println("   </triangles>");
        out.println("  </mesh>");
        out.println(" </geometry>");
    }
}

From source file:org.apache.hadoop.yarn.server.resourcemanager.security.TestDelegationTokenRenewer.java

@Test
public void testAppSubmissionWithoutDelegationToken() throws Exception {
    conf.setBoolean(YarnConfiguration.RM_PROXY_USER_PRIVILEGES_ENABLED, true);
    // create token2
    Text userText2 = new Text("user2");
    DelegationTokenIdentifier dtId2 = new DelegationTokenIdentifier(new Text("user2"), new Text("renewer2"),
            userText2);/*from   w w w . j  a va 2 s . c om*/
    final Token<DelegationTokenIdentifier> token2 = new Token<DelegationTokenIdentifier>(dtId2.getBytes(),
            "password2".getBytes(), dtId2.getKind(), new Text("service2"));
    final MockRM rm = new TestSecurityMockRM(conf, null) {
        @Override
        protected DelegationTokenRenewer createDelegationTokenRenewer() {
            return new DelegationTokenRenewer() {
                @Override
                protected Token<?>[] obtainSystemTokensForUser(String user, final Credentials credentials)
                        throws IOException {
                    credentials.addToken(token2.getService(), token2);
                    return new Token<?>[] { token2 };
                }
            };
        }
    };
    rm.start();

    // submit an app without delegationToken
    RMApp app = rm.submitApp(200);

    // wait for the new retrieved hdfs token.
    GenericTestUtils.waitFor(new Supplier<Boolean>() {
        public Boolean get() {
            return rm.getRMContext().getDelegationTokenRenewer().getDelegationTokens().contains(token2);
        }
    }, 1000, 20000);

    // check nm can retrieve the token
    final MockNM nm1 = new MockNM("127.0.0.1:1234", 15120, rm.getResourceTrackerService());
    nm1.registerNode();
    NodeHeartbeatResponse response = nm1.nodeHeartbeat(true);
    ByteBuffer tokenBuffer = response.getSystemCredentialsForApps().get(app.getApplicationId());
    Assert.assertNotNull(tokenBuffer);
    Credentials appCredentials = new Credentials();
    DataInputByteBuffer buf = new DataInputByteBuffer();
    tokenBuffer.rewind();
    buf.reset(tokenBuffer);
    appCredentials.readTokenStorageStream(buf);
    Assert.assertTrue(appCredentials.getAllTokens().contains(token2));
}

From source file:com.alvermont.terraj.fracplanet.geom.VertexBufferArray.java

/**
 * Resize the buffer. This is done by reallocating a new one and copying
 * data from the old buffer to the new one. This is necessary as buffers
 * cannot be dynamically resized.//from w  w  w . j a  v  a 2s .c  om
 */
protected void resizeBuffer() {
    // we can't resize it so we have to allocate a new one and copy the data
    final int slots = (buffer.capacity() / ELEMENTSIZE);
    final int newCapacity = buffer.capacity()
            + (((slots * CAPACITY_PCT_INCREASE) / HUNDRED_PERCENT) * ELEMENTSIZE);

    final ByteBuffer newBuffer = ByteBuffer.allocateDirect(newCapacity).order(ByteOrder.nativeOrder());

    if (log.isDebugEnabled()) {
        log.debug("Resizing vertex buffer capacity to: " + newBuffer.capacity());
    }

    final FloatBuffer oldVertexBuffer = positionBuffer;
    final FloatBuffer oldNormalBuffer = normalBuffer;
    final ByteBuffer oldColourBuffer = colourBuffer;
    final ByteBuffer oldEmissiveBuffer = emissiveBuffer;

    this.buffer = newBuffer;

    sliceAndDice(newCapacity / ELEMENTSIZE);

    oldVertexBuffer.rewind();
    positionBuffer.rewind();
    positionBuffer.limit(oldVertexBuffer.limit());
    positionBuffer.put(oldVertexBuffer);

    oldNormalBuffer.rewind();
    normalBuffer.rewind();
    normalBuffer.limit(oldNormalBuffer.limit());
    normalBuffer.put(oldNormalBuffer);

    oldColourBuffer.rewind();
    colourBuffer.rewind();
    colourBuffer.limit(oldColourBuffer.limit());
    colourBuffer.put(oldColourBuffer);

    oldEmissiveBuffer.rewind();
    emissiveBuffer.rewind();
    emissiveBuffer.limit(oldEmissiveBuffer.limit());
    emissiveBuffer.put(oldEmissiveBuffer);
}