Example usage for java.nio.channels WritableByteChannel write

List of usage examples for java.nio.channels WritableByteChannel write

Introduction

In this page you can find the example usage for java.nio.channels WritableByteChannel write.

Prototype

public int write(ByteBuffer src) throws IOException;

Source Link

Document

Writes a sequence of bytes to this channel from the given buffer.

Usage

From source file:org.alfresco.contentstore.AbstractContentStore.java

protected int applyPatch(ReadableByteChannel inChannel, WritableByteChannel outChannel,
        PatchDocument patchDocument) throws IOException {
    InChannel c = new InChannel(inChannel, patchDocument.getMatchedBlocks(), patchDocument.getBlockSize());

    int totalWritten = 0;

    int blockIndex = -1;

    //        int blockIndex = c.nextBlock();
    //        if(blockIndex > -1)
    //        {/*from w  w  w. j av a2  s.  c  o m*/
    for (Patch patch : patchDocument.getPatches()) {
        int lastMatchingBlockIndex = patch.getLastMatchIndex();

        blockIndex = c.nextBlock();
        while (blockIndex != -1 && blockIndex <= lastMatchingBlockIndex) {
            int bytesWritten = outChannel.write(c.currentBlock);
            totalWritten += bytesWritten;
            if (bytesWritten != c.bytesRead) {
                throw new RuntimeException("Wrote too few bytes, " + c.blockSize + ", " + bytesWritten);
            }

            blockIndex = c.nextBlock();
            if (blockIndex == -1) {
                break;
            }
        }

        // apply patch
        int patchSize = patch.getSize();
        ReadableByteChannel patchChannel = Channels.newChannel(patch.getStream());
        ByteBuffer patchBB = ByteBuffer.allocate(patchSize);
        int bytesRead = patchChannel.read(patchBB);
        patchBB.flip();
        int bytesWritten = outChannel.write(patchBB);
        totalWritten += bytesWritten;
        if (bytesWritten != bytesRead) {
            throw new RuntimeException("Wrote too few bytes, expected " + bytesRead + ", got " + bytesWritten);
        }
    }

    // we're done with all the patches, add the remaining blocks
    while (blockIndex != -1) {
        int bytesWritten = outChannel.write(c.currentBlock);
        totalWritten += bytesWritten;
        if (bytesWritten != c.bytesRead) {
            throw new RuntimeException("Wrote too few bytes");
        }

        blockIndex = c.nextBlock();
    }
    //        }

    return totalWritten;
}

From source file:org.apache.hadoop.mapred.FadvisedFileRegion.java

/**
 * This method transfers data using local buffer. It transfers data from 
 * a disk to a local buffer in memory, and then it transfers data from the 
 * buffer to the target. This is used only if transferTo is disallowed in
 * the configuration file. super.TransferTo does not perform well on Windows 
 * due to a small IO request generated. customShuffleTransfer can control 
 * the size of the IO requests by changing the size of the intermediate 
 * buffer.//from   w w w  .ja v a  2s . c om
 */
@VisibleForTesting
long customShuffleTransfer(WritableByteChannel target, long position) throws IOException {
    long actualCount = this.count - position;
    if (actualCount < 0 || position < 0) {
        throw new IllegalArgumentException(
                "position out of range: " + position + " (expected: 0 - " + (this.count - 1) + ')');
    }
    if (actualCount == 0) {
        return 0L;
    }

    long trans = actualCount;
    int readSize;
    ByteBuffer byteBuffer = ByteBuffer.allocate(this.shuffleBufferSize);

    while (trans > 0L && (readSize = fileChannel.read(byteBuffer, this.position + position)) > 0) {
        //adjust counters and buffer limit
        if (readSize < trans) {
            trans -= readSize;
            position += readSize;
            byteBuffer.flip();
        } else {
            //We can read more than we need if the actualCount is not multiple 
            //of the byteBuffer size and file is big enough. In that case we cannot
            //use flip method but we need to set buffer limit manually to trans.
            byteBuffer.limit((int) trans);
            byteBuffer.position(0);
            position += trans;
            trans = 0;
        }

        //write data to the target
        while (byteBuffer.hasRemaining()) {
            target.write(byteBuffer);
        }

        byteBuffer.clear();
    }

    return actualCount - trans;
}

From source file:org.apache.tajo.pullserver.FadvisedFileRegion.java

/**
 * This method transfers data using local buffer. It transfers data from
 * a disk to a local buffer in memory, and then it transfers data from the
 * buffer to the target. This is used only if transferTo is disallowed in
 * the configuration file. super.TransferTo does not perform well on Windows
 * due to a small IO request generated. customShuffleTransfer can control
 * the size of the IO requests by changing the size of the intermediate
 * buffer.//from  w w w .j  av a 2 s. c om
 */
@VisibleForTesting
long customShuffleTransfer(WritableByteChannel target, long position) throws IOException {
    long actualCount = this.count - position;
    if (actualCount < 0 || position < 0) {
        throw new IllegalArgumentException(
                "position out of range: " + position + " (expected: 0 - " + (this.count - 1) + ')');
    }
    if (actualCount == 0) {
        return 0L;
    }

    long trans = actualCount;
    int readSize;
    ByteBuffer byteBuffer = ByteBuffer.allocate(this.shuffleBufferSize);

    while (trans > 0L && (readSize = fileChannel.read(byteBuffer, this.position + position)) > 0) {
        //adjust counters and buffer limit
        if (readSize < trans) {
            trans -= readSize;
            position += readSize;
            byteBuffer.flip();
        } else {
            //We can read more than we need if the actualCount is not multiple
            //of the byteBuffer size and file is big enough. In that case we cannot
            //use flip method but we need to set buffer limit manually to trans.
            byteBuffer.limit((int) trans);
            byteBuffer.position(0);
            position += trans;
            trans = 0;
        }

        //write data to the target
        while (byteBuffer.hasRemaining()) {
            target.write(byteBuffer);
        }

        byteBuffer.clear();
    }

    return actualCount - trans;
}

From source file:com.alibaba.jstorm.daemon.nimbus.ServiceHandler.java

/**
 * uploading topology jar data/*from   w ww. j a v  a2  s .  c  om*/
 */
@Override
public void uploadChunk(String location, ByteBuffer chunk) throws TException {
    TimeCacheMap<Object, Object> uploaders = data.getUploaders();
    Object obj = uploaders.get(location);
    if (obj == null) {
        throw new TException("File for that location does not exist (or timed out) " + location);
    }
    try {
        if (obj instanceof WritableByteChannel) {
            WritableByteChannel channel = (WritableByteChannel) obj;
            channel.write(chunk);
            uploaders.put(location, channel);
        } else {
            throw new TException("Object isn't WritableByteChannel for " + location);
        }
    } catch (IOException e) {
        String errMsg = " WritableByteChannel write filed when uploadChunk " + location;
        LOG.error(errMsg);
        throw new TException(e);
    }

}

From source file:ipc.Server.java

/**
 * This is a wrapper around {@link WritableByteChannel#write(ByteBuffer)}.
 * If the amount of data is large, it writes to channel in smaller chunks. 
 * This is to avoid jdk from creating many direct buffers as the size of 
 * buffer increases. This also minimizes extra copies in NIO layer
 * as a result of multiple write operations required to write a large 
 * buffer.  //from   ww  w.j  ava2s .  c o m
 *
 * @see WritableByteChannel#write(ByteBuffer)
 */
private int channelWrite(WritableByteChannel channel, ByteBuffer buffer) throws IOException {

    int count = (buffer.remaining() <= NIO_BUFFER_LIMIT) ? channel.write(buffer)
            : channelIO(null, channel, buffer);
    return count;
}

From source file:org.apache.olingo.server.core.serializer.json.ODataJsonSerializerTest.java

@Test
public void entityCollectionStreamedWithError() throws Exception {
    final EdmEntitySet edmEntitySet = entityContainer.getEntitySet("ESAllPrim");
    final EntityIterator entityIterator = new EntityIterator() {
        EntityCollection entityCollection = data.readAll(edmEntitySet);
        Iterator<Entity> innerIterator = entityCollection.iterator();

        @Override/*from  ww  w.  ja v a  2 s  .  c  o m*/
        public List<Operation> getOperations() {
            return entityCollection.getOperations();
        }

        @Override
        public boolean hasNext() {
            return innerIterator.hasNext();
        }

        @Override
        public Entity next() {
            return new Entity();
        }
    };
    CountOption countOption = Mockito.mock(CountOption.class);
    Mockito.when(countOption.getValue()).thenReturn(true);

    ODataContentWriteErrorCallback errorCallback = new ODataContentWriteErrorCallback() {
        @Override
        public void handleError(ODataContentWriteErrorContext context, WritableByteChannel channel) {
            try {
                Exception ex = context.getException();
                Assert.assertEquals(ex, context.getODataLibraryException());
                String msgKey = context.getODataLibraryException().getMessageKey().getKey();
                String toChannel = "ERROR: " + msgKey;
                channel.write(ByteBuffer.wrap(toChannel.getBytes("UTF-8")));
            } catch (IOException e) {
                throw new RuntimeException("Error in error.");
            }
        }
    };

    ODataContent result = serializer
            .entityCollectionStreamed(metadata, edmEntitySet.getEntityType(), entityIterator,
                    EntityCollectionSerializerOptions.with().writeContentErrorCallback(errorCallback)
                            .contextURL(ContextURL.with().entitySet(edmEntitySet).build()).build())
            .getODataContent();
    ByteArrayOutputStream bout = new ByteArrayOutputStream();
    result.write(bout);
    final String resultString = new String(bout.toByteArray(), "UTF-8");
    Assert.assertEquals(resultString, "ERROR: MISSING_PROPERTY");
}

From source file:com.hortonworks.hbase.replication.bridge.HBaseServer.java

/**
 * This is a wrapper around {@link java.nio.channels.WritableByteChannel#write(java.nio.ByteBuffer)}.
 * If the amount of data is large, it writes to channel in smaller chunks.
 * This is to avoid jdk from creating many direct buffers as the size of
 * buffer increases. This also minimizes extra copies in NIO layer
 * as a result of multiple write operations required to write a large
 * buffer.//w  w w  .  j a va 2 s .c om
 *
 * @param channel writable byte channel to write to
 * @param buffer buffer to write
 * @return number of bytes written
 * @throws java.io.IOException e
 * @see java.nio.channels.WritableByteChannel#write(java.nio.ByteBuffer)
 */
protected int channelWrite(WritableByteChannel channel, ByteBuffer buffer) throws IOException {

    int count = (buffer.remaining() <= NIO_BUFFER_LIMIT) ? channel.write(buffer)
            : channelIO(null, channel, buffer);
    if (count > 0) {
        rpcMetrics.sentBytes.inc(count);
    }
    return count;
}

From source file:com.github.hrpc.rpc.Server.java

/**
 * This is a wrapper around {@link WritableByteChannel#write(ByteBuffer)}.
 * If the amount of data is large, it writes to channel in smaller chunks.
 * This is to avoid jdk from creating many direct buffers as the size of
 * buffer increases. This also minimizes extra copies in NIO layer
 * as a result of multiple write operations required to write a large
 * buffer.// ww  w.j a  v a  2s  .c  o  m
 *
 * @see WritableByteChannel#write(ByteBuffer)
 */
private int channelWrite(WritableByteChannel channel, ByteBuffer buffer) throws IOException {

    int count = (buffer.remaining() <= NIO_BUFFER_LIMIT) ? channel.write(buffer)
            : channelIO(null, channel, buffer);
    if (count > 0) {
        rpcMetrics.incrSentBytes(count);
    }
    return count;
}

From source file:org.apache.olingo.server.core.serializer.json.ODataJsonSerializerv01Test.java

@Test
public void entityCollectionStreamedWithError() throws Exception {
    final EdmEntitySet edmEntitySet = entityContainer.getEntitySet("ESAllPrim");
    final EntityIterator entityIterator = new EntityIterator() {
        EntityCollection entityCollection = data.readAll(edmEntitySet);
        Iterator<Entity> innerIterator = entityCollection.iterator();

        @Override//w w  w  .  j a  v a 2  s.  c o  m
        public List<Operation> getOperations() {
            return entityCollection.getOperations();
        }

        @Override
        public boolean hasNext() {
            return innerIterator.hasNext();
        }

        @Override
        public Entity next() {
            Entity e = new Entity();
            e.setId(URI.create("id"));
            return e;
        }
    };
    CountOption countOption = Mockito.mock(CountOption.class);
    Mockito.when(countOption.getValue()).thenReturn(true);

    ODataContentWriteErrorCallback errorCallback = new ODataContentWriteErrorCallback() {
        @Override
        public void handleError(ODataContentWriteErrorContext context, WritableByteChannel channel) {
            try {
                Exception ex = context.getException();
                Assert.assertEquals(ex, context.getODataLibraryException());
                String msgKey = context.getODataLibraryException().getMessageKey().getKey();
                String toChannel = "ERROR: " + msgKey;
                channel.write(ByteBuffer.wrap(toChannel.getBytes("UTF-8")));
            } catch (IOException e) {
                throw new RuntimeException("Error in error.");
            }
        }
    };

    ODataContent result = serializer
            .entityCollectionStreamed(metadata, edmEntitySet.getEntityType(), entityIterator,
                    EntityCollectionSerializerOptions.with().writeContentErrorCallback(errorCallback)
                            .contextURL(ContextURL.with().entitySet(edmEntitySet).build()).build())
            .getODataContent();
    ByteArrayOutputStream bout = new ByteArrayOutputStream();
    result.write(bout);
    final String resultString = new String(bout.toByteArray(), "UTF-8");
    Assert.assertEquals("ERROR: MISSING_PROPERTY", resultString);
}

From source file:edu.harvard.iq.dvn.core.web.servlet.FileDownloadServlet.java

public void streamData(FileChannel in, WritableByteChannel out, String varHeader) {

    long position = 0;
    long howMany = 32 * 1024;

    try {/*from   w w w. j a  v  a  2 s .  com*/
        // If we are streaming a TAB-delimited file, we will need to add the
        // variable header line:

        if (varHeader != null) {
            ByteBuffer varHeaderByteBuffer = ByteBuffer.wrap(varHeader.getBytes());
            out.write(varHeaderByteBuffer);
        }

        while (position < in.size()) {
            in.transferTo(position, howMany, out);
            position += howMany;
        }

        in.close();
        out.close();
    } catch (IOException ex) {
        // whatever. we don't care at this point.
    }

}