Example usage for java.nio ByteBuffer clear

List of usage examples for java.nio ByteBuffer clear

Introduction

In this page you can find the example usage for java.nio ByteBuffer clear.

Prototype

public final Buffer clear() 

Source Link

Document

Clears this buffer.

Usage

From source file:org.apache.nifi.io.nio.AbstractChannelReader.java

@Override
public final void run() {
    if (!key.isValid() || consumer.isConsumerFinished()) {
        closeStream();/*from   w ww .j av  a 2 s .c  om*/
        return;
    }
    if (!key.isReadable()) {
        return;//there is nothing available to read...or we aren't allow to read due to throttling
    }
    ByteBuffer buffer = null;
    try {
        buffer = bufferPool.poll();
        if (buffer == null) {
            return; // no buffers available - come back later
        }
        final int bytesRead = fillBuffer(key, buffer);
        buffer.flip();
        if (buffer.remaining() > 0) {
            consumer.addFilledBuffer(buffer);
            buffer = null; //clear the reference - is now the consumer's responsibility
        } else {
            buffer.clear();
            bufferPool.returnBuffer(buffer, 0);
            buffer = null; //clear the reference - is now back to the queue
        }
        if (bytesRead < 0) { //we've reached the end
            closeStream();
        }
    } catch (final Exception ioe) {
        closeStream();
        LOGGER.error("Closed channel reader " + this + " due to " + ioe);
    } finally {
        if (buffer != null) {
            buffer.clear();
            bufferPool.returnBuffer(buffer, 0);
        }
    }
}

From source file:edu.umn.cs.spatialHadoop.nasa.HDFRasterLayer.java

@Override
public void write(DataOutput out) throws IOException {
    super.write(out);
    out.writeLong(timestamp);//from   w  ww .j  ava 2s.  c  o  m
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    GZIPOutputStream gzos = new GZIPOutputStream(baos);
    ByteBuffer bbuffer = ByteBuffer.allocate(getHeight() * 2 * 8 + 8);
    bbuffer.putInt(getWidth());
    bbuffer.putInt(getHeight());
    gzos.write(bbuffer.array(), 0, bbuffer.position());
    for (int x = 0; x < getWidth(); x++) {
        bbuffer.clear();
        for (int y = 0; y < getHeight(); y++) {
            bbuffer.putLong(sum[x][y]);
            bbuffer.putLong(count[x][y]);
        }
        gzos.write(bbuffer.array(), 0, bbuffer.position());
    }
    gzos.close();

    byte[] serializedData = baos.toByteArray();
    out.writeInt(serializedData.length);
    out.write(serializedData);
}

From source file:org.apache.parquet.hadoop.DirectCodecFactory.java

private ByteBuffer ensure(ByteBuffer buffer, int size) {
    if (buffer == null) {
        buffer = allocator.allocate(size);
    } else if (buffer.capacity() >= size) {
        buffer.clear();
    } else {/*w  ww .  j av  a 2s  .  c  om*/
        release(buffer);
        buffer = allocator.allocate(size);
    }
    return buffer;
}

From source file:com.l2jfree.network.mmocore.ReadWriteThread.java

final void recycleBuffer(ByteBuffer buf) {
    if (getFreeBuffers().size() < getHelperBufferCount()) {
        buf.clear();
        getFreeBuffers().addLast(buf);/*from   w w w  .j av  a 2s .c om*/
    }
}

From source file:org.apache.vxquery.cli.VXQuery.java

/**
 * Creates a Hyracks dataset, if not already existing with the job frame size, and 1 reader. Allocates a new buffer of size specified in the frame of Hyracks
 * node. Creates new dataset reader with the current job ID and result set ID. Outputs the string in buffer for each frame.
 * /*w w  w . j ava 2  s . c  om*/
 * @param spec
 *            JobSpecification object, containing frame size. Current specified job.
 * @param writer
 *            Writer for output of job.
 * @throws Exception
 */
private void runJob(JobSpecification spec, PrintWriter writer) throws Exception {
    int nReaders = 1;
    if (hds == null) {
        hds = new HyracksDataset(hcc, spec.getFrameSize(), nReaders);
    }

    JobId jobId = hcc.startJob(spec, EnumSet.of(JobFlag.PROFILE_RUNTIME));

    ByteBuffer buffer = ByteBuffer.allocate(spec.getFrameSize());
    IHyracksDatasetReader reader = hds.createReader(jobId, resultSetId);
    IFrameTupleAccessor frameTupleAccessor = new ResultFrameTupleAccessor(spec.getFrameSize());
    buffer.clear();

    while (reader.read(buffer) > 0) {
        buffer.clear();
        writer.print(ResultUtils.getStringFromBuffer(buffer, frameTupleAccessor));
        writer.flush();
    }

    hcc.waitForCompletion(jobId);
}

From source file:org.apache.hadoop.hbase.client.TestResult.java

/**
 * Microbenchmark that compares {@link Result#getValue} and {@link Result#loadValue} performance.
 *
 * @throws Exception/*from  ww  w  . j  ava 2 s .  c  o  m*/
 */
public void doReadBenchmark() throws Exception {

    final int n = 5;
    final int m = 100000000;

    StringBuilder valueSB = new StringBuilder();
    for (int i = 0; i < 100; i++) {
        valueSB.append((byte) (Math.random() * 10));
    }

    StringBuilder rowSB = new StringBuilder();
    for (int i = 0; i < 50; i++) {
        rowSB.append((byte) (Math.random() * 10));
    }

    KeyValue[] kvs = genKVs(Bytes.toBytes(rowSB.toString()), family, Bytes.toBytes(valueSB.toString()), 1, n);
    Arrays.sort(kvs, KeyValue.COMPARATOR);
    ByteBuffer loadValueBuffer = ByteBuffer.allocate(1024);
    Result r = Result.create(kvs);

    byte[][] qfs = new byte[n][Bytes.SIZEOF_INT];
    for (int i = 0; i < n; ++i) {
        System.arraycopy(qfs[i], 0, Bytes.toBytes(i), 0, Bytes.SIZEOF_INT);
    }

    // warm up
    for (int k = 0; k < 100000; k++) {
        for (int i = 0; i < n; ++i) {
            r.getValue(family, qfs[i]);
            loadValueBuffer.clear();
            r.loadValue(family, qfs[i], loadValueBuffer);
            loadValueBuffer.flip();
        }
    }

    System.gc();
    long start = System.nanoTime();
    for (int k = 0; k < m; k++) {
        for (int i = 0; i < n; ++i) {
            loadValueBuffer.clear();
            r.loadValue(family, qfs[i], loadValueBuffer);
            loadValueBuffer.flip();
        }
    }
    long stop = System.nanoTime();
    System.out.println("loadValue(): " + (stop - start));

    System.gc();
    start = System.nanoTime();
    for (int k = 0; k < m; k++) {
        for (int i = 0; i < n; i++) {
            r.getValue(family, qfs[i]);
        }
    }
    stop = System.nanoTime();
    System.out.println("getValue():  " + (stop - start));
}

From source file:ga.rugal.jpt.common.tracker.common.Torrent.java

private static String hashFiles(List<File> files, int pieceLenght) throws InterruptedException, IOException {
    int threads = getHashingThreadsCount();
    ExecutorService executor = Executors.newFixedThreadPool(threads);
    ByteBuffer buffer = ByteBuffer.allocate(pieceLenght);
    List<Future<String>> results = new LinkedList<>();
    StringBuilder hashes = new StringBuilder();

    long length = 0L;
    int pieces = 0;

    long start = System.nanoTime();
    for (File file : files) {
        LOG.info("Hashing data from {} with {} threads ({} pieces)...", new Object[] { file.getName(), threads,
                (int) (Math.ceil((double) file.length() / pieceLenght)) });

        length += file.length();/*from w w w  . j  ava  2 s  .  c  o m*/

        FileInputStream fis = new FileInputStream(file);
        FileChannel channel = fis.getChannel();
        int step = 10;

        try {
            while (channel.read(buffer) > 0) {
                if (buffer.remaining() == 0) {
                    buffer.clear();
                    results.add(executor.submit(new CallableChunkHasher(buffer)));
                }

                if (results.size() >= threads) {
                    pieces += accumulateHashes(hashes, results);
                }

                if (channel.position() / (double) channel.size() * 100f > step) {
                    LOG.info("  ... {}% complete", step);
                    step += 10;
                }
            }
        } finally {
            channel.close();
            fis.close();
        }
    }

    // Hash the last bit, if any
    if (buffer.position() > 0) {
        buffer.limit(buffer.position());
        buffer.position(0);
        results.add(executor.submit(new CallableChunkHasher(buffer)));
    }

    pieces += accumulateHashes(hashes, results);

    // Request orderly executor shutdown and wait for hashing tasks to
    // complete.
    executor.shutdown();
    while (!executor.isTerminated()) {
        Thread.sleep(10);
    }
    long elapsed = System.nanoTime() - start;

    int expectedPieces = (int) (Math.ceil((double) length / pieceLenght));
    LOG.info("Hashed {} file(s) ({} bytes) in {} pieces ({} expected) in {}ms.", new Object[] { files.size(),
            length, pieces, expectedPieces, String.format("%.1f", elapsed / 1e6), });

    return hashes.toString();
}

From source file:org.apache.axiom.attachments.impl.BufferUtils.java

/**
 * Opimized writing to FileOutputStream using a channel
 * @param is//  w w w.  j ava2s .c  o  m
 * @param fos
 * @return false if lock was not aquired
 * @throws IOException
 */
public static boolean inputStream2FileOutputStream(InputStream is, FileOutputStream fos) throws IOException {

    // See if a file channel and lock can be obtained on the FileOutputStream
    FileChannel channel = null;
    FileLock lock = null;
    ByteBuffer bb = null;
    try {
        channel = fos.getChannel();
        if (channel != null) {
            lock = channel.tryLock();
        }
        bb = getTempByteBuffer();
    } catch (Throwable t) {
    }
    if (lock == null || bb == null || !bb.hasArray()) {
        releaseTempByteBuffer(bb);
        return false; // lock could not be set or bb does not have direct array access
    }

    try {

        // Read directly into the ByteBuffer array
        int bytesRead = is.read(bb.array());
        // Continue reading until no bytes are read and no
        // bytes are now available.
        while (bytesRead > 0 || is.available() > 0) {
            if (bytesRead > 0) {
                int written = 0;

                if (bytesRead < BUFFER_LEN) {
                    // If the ByteBuffer is not full, allocate a new one
                    ByteBuffer temp = ByteBuffer.allocate(bytesRead);
                    temp.put(bb.array(), 0, bytesRead);
                    temp.position(0);
                    written = channel.write(temp);
                } else {
                    // Write to channel
                    bb.position(0);
                    written = channel.write(bb);
                    bb.clear();
                }

            }

            // REVIEW: Do we need to ensure that bytesWritten is 
            // the same as the number of bytes sent ?

            bytesRead = is.read(bb.array());
        }
    } finally {
        // Release the lock
        lock.release();
        releaseTempByteBuffer(bb);
    }
    return true;
}

From source file:com.linkedin.databus.bootstrap.utils.BootstrapTableReader.java

public void execute() throws Exception {
    String query = getQuery();//from   ww w .j  a v a2  s  .com
    Connection conn = null;
    Statement stmt = null;
    ResultSet rs = null;
    try {
        conn = getConnection();
        stmt = conn.createStatement();

        LOG.info("Executing query : " + query);
        rs = stmt.executeQuery(query);

        byte[] b1 = new byte[1024 * 1024];
        ByteBuffer buffer = ByteBuffer.wrap(b1);
        DbusEventInternalReadable event = _eventFactory.createReadOnlyDbusEventFromBuffer(buffer, 0);

        int count = 0;
        _eventHandler.onStart(query);

        while (rs.next()) {
            buffer.clear();
            buffer.put(rs.getBytes("val"));
            event = event.reset(buffer, 0);
            GenericRecord record = _decoder.getGenericRecord(event);
            _eventHandler.onRecord(event, record);
            count++;
        }
        _eventHandler.onEnd(count);
    } finally {
        DBHelper.close(rs, stmt, conn);
    }
}

From source file:com.esri.geoevent.test.performance.ClockSync.java

@Override
public void run() {
    DatagramSocket socket = null;
    try {/*w  w w.  j  a  v  a  2 s .  com*/
        byte[] incomingBuffer = new byte[1024];
        DatagramPacket packet = new DatagramPacket(incomingBuffer, incomingBuffer.length);

        ByteBuffer bb = ByteBuffer.allocate(8);
        DatagramPacket outgoingPacket = new DatagramPacket(bb.array(), 0, 8, null, port);
        socket = new DatagramSocket(port);
        socket.setSoTimeout(100);
        while (isRunning.get()) {
            try {
                socket.receive(packet);
                long now = System.currentTimeMillis();
                bb.putLong(now);
                outgoingPacket.setAddress(packet.getAddress());
                outgoingPacket.setPort(packet.getPort());
                socket.send(outgoingPacket);
                bb.clear();
                //System.out.println("Sent the time " + now);
            } catch (SocketTimeoutException ex) {
                // Do nothing if nothing was sent.
            }
        }
    } catch (BindException e) {
        // port is in use - increment and try again
        port++;
        this.run();
    } catch (SocketException e) {
        e.printStackTrace();
    } catch (IOException e) {
        e.printStackTrace();
    } finally {
        IOUtils.closeQuietly(socket);
    }
}