Example usage for java.nio ByteBuffer allocate

List of usage examples for java.nio ByteBuffer allocate

Introduction

In this page you can find the example usage for java.nio ByteBuffer allocate.

Prototype

public static ByteBuffer allocate(int capacity) 

Source Link

Document

Creates a byte buffer based on a newly allocated byte array.

Usage

From source file:com.owncloud.android.oc_framework.network.webdav.FileRequestEntity.java

@Override
public void writeRequest(final OutputStream out) throws IOException {
    //byte[] tmp = new byte[4096];
    ByteBuffer tmp = ByteBuffer.allocate(4096);
    int readResult = 0;

    // TODO(bprzybylski): each mem allocation can throw OutOfMemoryError we need to handle it
    //                    globally in some fashionable manner
    RandomAccessFile raf = new RandomAccessFile(mFile, "r");
    FileChannel channel = raf.getChannel();
    Iterator<OnDatatransferProgressListener> it = null;
    long transferred = 0;
    long size = mFile.length();
    if (size == 0)
        size = -1;//from  w  w  w  . j  a  va2s.c om
    try {
        while ((readResult = channel.read(tmp)) >= 0) {
            out.write(tmp.array(), 0, readResult);
            tmp.clear();
            transferred += readResult;
            synchronized (mDataTransferListeners) {
                it = mDataTransferListeners.iterator();
                while (it.hasNext()) {
                    it.next().onTransferProgress(readResult, transferred, size, mFile.getName());
                }
            }
        }

    } catch (IOException io) {
        Log.e("FileRequestException", io.getMessage());
        throw new RuntimeException(
                "Ugly solution to workaround the default policy of retries when the server falls while uploading ; temporal fix; really",
                io);

    } finally {
        channel.close();
        raf.close();
    }
}

From source file:io.druid.query.search.FragmentSearchQuerySpec.java

@Override
public byte[] getCacheKey() {
    if (values == null) {
        return ByteBuffer.allocate(2).put(CACHE_TYPE_ID).put(caseSensitive ? (byte) 1 : 0).array();
    }//from ww  w  .j a v  a 2s  .  c  o m

    final byte[][] valuesBytes = new byte[values.size()][];
    int valuesBytesSize = 0;
    int index = 0;
    for (String value : values) {
        valuesBytes[index] = StringUtils.toUtf8(value);
        valuesBytesSize += valuesBytes[index].length;
        ++index;
    }

    final ByteBuffer queryCacheKey = ByteBuffer.allocate(2 + valuesBytesSize).put(CACHE_TYPE_ID)
            .put(caseSensitive ? (byte) 1 : 0);

    for (byte[] bytes : valuesBytes) {
        queryCacheKey.put(bytes);
    }

    return queryCacheKey.array();
}

From source file:oz.hadoop.yarn.api.net.AbstractSocketHandler.java

/**
 * //from  w  w  w  .j  a  v  a  2 s .com
 * @param address
 * @param server
 */
public AbstractSocketHandler(InetSocketAddress address, boolean server, Runnable onDisconnectTask) {
    Assert.notNull(address);
    this.onDisconnectTask = onDisconnectTask;
    this.address = address;
    this.executor = Executors.newCachedThreadPool();
    this.listenerTask = new ListenerTask();
    this.readingBuffer = ByteBuffer.allocate(16384);
    this.bufferPoll = new ByteBufferPool();
    try {
        this.rootChannel = server ? ServerSocketChannel.open() : SocketChannel.open();
        if (logger.isDebugEnabled()) {
            logger.debug("Created instance of " + this.getClass().getSimpleName());
        }
    } catch (Exception e) {
        throw new IllegalStateException("Failed to create an instance of the ApplicationContainerClient", e);
    }
    this.thisClass = this.getClass();
    this.lifeCycleLatch = new CountDownLatch(1);
}

From source file:de.rwhq.io.rm.FileResourceManager.java

@Override
public RawPage getPage(final int pageId) {

    ensureOpen();//from w w w .j a va 2 s .  c om
    ensurePageExists(pageId);

    final RawPage result;

    final ByteBuffer buf = ByteBuffer.allocate(header.getPageSize());

    try {
        ioChannel.read(buf, header.getPageOffset(pageId));
    } catch (IOException e) {
        e.printStackTrace();
        System.exit(1);
    }

    result = new RawPage(buf, pageId, this);
    return result;
}

From source file:gridool.memcached.gateway.MemcachedProxyHandler.java

@Override
public byte[] handleGet(byte[] key) {
    final ByteBuffer reqPacket = ByteBuffer.allocate(HEADER_LENGTH + key.length);
    // request header
    Header header = new Header(MAGIC_BYTE_REQUEST, OPCODE_GET);
    header.setBodyLength(GET_EXTRA_LENGTH, key.length, 0);
    header.encode(reqPacket);//from  w w w  . j  av a 2  s. com
    // request body (key)
    reqPacket.put(key);
    reqPacket.flip();

    final byte[] value;
    final SocketAddress sockAddr = getSocket(key);
    final ByteChannel channel = sockPool.borrowObject(sockAddr);
    try {
        // handle request
        NIOUtils.writeFully(channel, reqPacket);

        // handle response header
        ByteBuffer responseHeaderPacket = ByteBuffer.allocate(HEADER_LENGTH);
        NIOUtils.readFully(channel, responseHeaderPacket);
        responseHeaderPacket.flip();
        // handle response body 
        int totalBody = responseHeaderPacket.getInt(8);
        int keyLen = responseHeaderPacket.getShort(2);
        int extraLen = responseHeaderPacket.get(4);
        int bodyPos = extraLen + keyLen;
        int bodyLen = totalBody - bodyPos;
        if (bodyLen <= 0) {
            return null;
        }
        ByteBuffer responseBodyPacket = ByteBuffer.allocate(totalBody);
        NIOUtils.readFully(channel, responseBodyPacket);
        responseBodyPacket.flip();
        value = new byte[bodyLen];
        responseBodyPacket.get(value, 0, bodyLen);
    } catch (IOException e) {
        LOG.error(e);
        return null;
    } finally {
        sockPool.returnObject(sockAddr, channel);
    }
    return value;
}

From source file:eu.alefzero.webdav.FileRequestEntity.java

@Override
public void writeRequest(final OutputStream out) throws IOException {
    //byte[] tmp = new byte[4096];
    ByteBuffer tmp = ByteBuffer.allocate(4096);
    int readResult = 0;

    // TODO(bprzybylski): each mem allocation can throw OutOfMemoryError we need to handle it
    //                    globally in some fashionable manner
    RandomAccessFile raf = new RandomAccessFile(mFile, "r");
    FileChannel channel = raf.getChannel();
    Iterator<OnDatatransferProgressListener> it = null;
    long transferred = 0;
    long size = mFile.length();
    if (size == 0)
        size = -1;/* w ww . j  a v a2 s  . co  m*/
    try {
        while ((readResult = channel.read(tmp)) >= 0) {
            out.write(tmp.array(), 0, readResult);
            tmp.clear();
            transferred += readResult;
            synchronized (mDataTransferListeners) {
                it = mDataTransferListeners.iterator();
                while (it.hasNext()) {
                    it.next().onTransferProgress(readResult, transferred, size, mFile.getName());
                }
            }
        }

    } catch (IOException io) {
        Log_OC.e("FileRequestException", io.getMessage());
        throw new RuntimeException(
                "Ugly solution to workaround the default policy of retries when the server falls while uploading ; temporal fix; really",
                io);

    } finally {
        channel.close();
        raf.close();
    }
}

From source file:com.bah.culvert.util.Bytes.java

/**
 * Convert a short to a byte []/*www  .  j  ava2  s.co  m*/
 * @param i
 * @return
 */
public static byte[] toBytes(short i) {
    int bytes = Short.SIZE / 8;

    return ByteBuffer.allocate(bytes).putShort(i).array();
}

From source file:org.apache.cxf.transport.http.asyncclient.CXFHttpAsyncRequestProducer.java

public void produceContent(final ContentEncoder enc, final IOControl ioc) throws IOException {
    if (content != null) {
        if (buffer == null) {
            if (content.getTempFile() == null) {
                buffer = ByteBuffer.wrap(content.getBytes());
            } else {
                fis = content.getInputStream();
                chan = (fis instanceof FileInputStream) ? ((FileInputStream) fis).getChannel()
                        : Channels.newChannel(fis);
                buffer = ByteBuffer.allocate(8 * 1024);
            }/*  ww w. java 2  s .  c  o  m*/
        }
        int i = -1;
        buffer.rewind();
        if (buffer.hasRemaining() && chan != null) {
            i = chan.read(buffer);
            buffer.flip();
        }
        enc.write(buffer);
        if (!buffer.hasRemaining() && i == -1) {
            enc.complete();
        }
    } else {
        buf.produceContent(enc, ioc);
    }
}

From source file:io.druid.segment.writeout.WriteOutBytesTest.java

private void verifyContents(WriteOutBytes writeOutBytes, String expected) throws IOException {
    Assert.assertEquals(expected, IOUtils.toString(writeOutBytes.asInputStream(), StandardCharsets.US_ASCII));
    ByteBuffer bb = ByteBuffer.allocate((int) writeOutBytes.size());
    writeOutBytes.readFully(0, bb);/*from w w w. j  av a 2 s .  c  om*/
    bb.flip();
    Assert.assertEquals(expected, StringUtils.fromUtf8(bb));
}

From source file:com.btoddb.fastpersitentqueue.JournalMgrIT.java

@Test
public void testInitWithExistingFiles() throws Exception {
    ///*from w w  w  .j av  a 2s.  co  m*/
    // create some files then shutdown
    //

    mgr.setMaxJournalFileSize(60);
    mgr.init();

    for (int i = 0; i < 100; i++) {
        mgr.append(new FpqEntry(idGen.incrementAndGet(), ByteBuffer.allocate(10).putInt(i).array()));
    }

    mgr.shutdown();

    //
    // startup again and make sure we have a writable file
    //

    mgr = new JournalMgr();
    mgr.setDirectory(theDir);
    mgr.setNumberOfFlushWorkers(4);
    mgr.setFlushPeriodInMs(1000);
    mgr.init();

    assertThat(mgr.getJournalsLoadedAtStartup(), is(50L));
    assertThat(mgr.getNumberOfEntries(), is(100L));
    assertThat(mgr.getJournalFiles().keySet(), hasSize(51));
    assertThat(mgr.getCurrentJournalDescriptor().isWritingFinished(), is(false));
}