Example usage for io.vertx.core.buffer Buffer buffer

List of usage examples for io.vertx.core.buffer Buffer buffer

Introduction

In this page you can find the example usage for io.vertx.core.buffer Buffer buffer.

Prototype

buffer

Source Link

Usage

From source file:com.cyngn.vertx.opentsdb.service.MetricsProcessor.java

License:Apache License

/**
 * Given a queue of metrics to send, process the metrics into the right format and send them over a socket
 *
 * @param metrics the metrics queue to work off
 *///from   w  ww. j a v a 2  s.com
public void processMetrics(LinkedBlockingQueue<String> metrics) {
    int metricCount = metrics.size();
    if (metricCount == 0) {
        return;
    }
    List<String> drainedMetrics = new ArrayList<>();

    metrics.drainTo(drainedMetrics);
    Buffer outputBuffer = Buffer.buffer();

    int senderPos = 0;
    MetricsSender currentSender = metricsSenders.get(senderPos);

    int nextRotateIndex = drainedMetrics.size() / metricsSenders.size();
    int switchInterval = nextRotateIndex + 1;

    // loop through and serialize the metrics and send them as we fill the buffer up to max buffer
    for (int i = 0; i < drainedMetrics.size(); i++) {
        // TODO ponder if one of the host is disconnected and stays that way
        if (i == nextRotateIndex) {
            // flush the current remaining data queued before moving to the next sender
            outputBuffer = write(currentSender, outputBuffer);

            senderPos++;
            currentSender = metricsSenders.get(senderPos);
            nextRotateIndex += switchInterval;
        }

        String metric = drainedMetrics.get(i);
        byte[] bytes = metric.getBytes();

        // if this would exceed the max buffer to send go ahead and pass to the sender
        if (bytes.length + outputBuffer.length() > maxBufferSizeInBytes) {
            outputBuffer = write(currentSender, outputBuffer);
        }

        outputBuffer.appendBytes(bytes);
    }

    // send whatever is left in the buffer
    if (outputBuffer.length() > 0) {
        write(currentSender, outputBuffer);
    }
}

From source file:com.cyngn.vertx.opentsdb.service.MetricsProcessor.java

License:Apache License

private Buffer write(MetricsSender sender, Buffer data) {
    boolean success = sender.write(data);
    if (!success) {
        bus.send(OpenTsDbService.ERROR_MESSAGE_ADDRESS,
                new JsonObject().put("error", EventBusMessage.WRITE_FAILURE.toString()));
    }//from w ww.  j a  va2 s.  c o m

    return Buffer.buffer();
}

From source file:com.englishtown.vertx.jersey.impl.DefaultJerseyHandler.java

License:Open Source License

/**
 * {@inheritDoc}/*from  www .  j  ava2 s.co m*/
 */
@Override
public void handle(final HttpServerRequest vertxRequest) {

    // Wait for the body for jersey to handle form/json/xml params
    if (shouldReadData(vertxRequest)) {
        if (logger.isDebugEnabled()) {
            logger.debug("DefaultJerseyHandler - handle request and read body: " + vertxRequest.method() + " "
                    + vertxRequest.uri());
        }
        final Buffer body = Buffer.buffer();

        vertxRequest.handler(buffer -> {
            body.appendBuffer(buffer);
            if (body.length() > maxBodySize) {
                throw new RuntimeException(
                        "The input stream has exceeded the max allowed body size " + maxBodySize + ".");
            }
        });
        vertxRequest.endHandler(aVoid -> {
            InputStream inputStream = new ByteArrayInputStream(body.getBytes());
            DefaultJerseyHandler.this.handle(vertxRequest, inputStream);
        });

    } else {
        if (logger.isDebugEnabled()) {
            logger.debug("DefaultJerseyHandler - handle request: " + vertxRequest.method() + " "
                    + vertxRequest.uri());
        }
        DefaultJerseyHandler.this.handle(vertxRequest, null);
    }

}

From source file:com.github.mcollovati.vertx.vaadin.VertxVaadinResponse.java

License:Open Source License

@Override
public PrintWriter getWriter() throws IOException {
    if (useOOS) {
        throw new IllegalStateException("getOutputStream() has already been called for this response");
    }/*  ww  w. ja  va 2 s .c om*/
    useWriter = true;
    return new PrintWriter(new Writer() {
        @Override
        public void write(char[] cbuf, int off, int len) throws IOException {
            outBuffer.appendString(new String(cbuf, off, len));
        }

        @Override
        public void flush() throws IOException {
            response.write(outBuffer);
            outBuffer = Buffer.buffer();
        }

        @Override
        public void close() throws IOException {
            response.end(outBuffer);
        }
    });
}

From source file:com.groupon.vertx.memcache.stream.MemcacheOutputStream.java

License:Apache License

public MemcacheOutputStream(NetSocket socket, final int maxBuffer) {
    if (maxBuffer <= 0) {
        throw new IllegalArgumentException("Invalid buffer size");
    }/*from  w  w  w  .  j av a 2  s. c  o m*/

    this.socket = socket;
    this.buffer = Buffer.buffer();
    this.maxBuffer = maxBuffer;
}

From source file:com.groupon.vertx.memcache.stream.MemcacheOutputStream.java

License:Apache License

private void flushBuffer() {
    if (buffer.length() > 0) {
        socket.write(buffer);
        buffer = Buffer.buffer();
    }
}

From source file:com.groupon.vertx.redis.RedisOutputStream.java

License:Apache License

public RedisOutputStream(NetSocket socket, final int maxBuffer) {
    if (maxBuffer <= 0) {
        throw new IllegalArgumentException("Invalid buffer size");
    }//  w  w w.  ja  v a  2 s  . c  o  m

    this.socket = socket;
    this.buffer = Buffer.buffer();
    this.maxBuffer = maxBuffer;
}

From source file:com.hubrick.vertx.s3.client.S3Client.java

License:Apache License

/**
 * Adaptively upload a file to S3 and take away the burden to choose between direct or multipart upload.
 * Since the minimum size of the multipart part has to be 5MB this method handles the upload automatically.
 * It either chooses between the direct upload if the stream contains less then 5MB or the multipart upload
 * if the stream is bigger then 5MB.//  www  .jav  a2s.c  om
 *
 * @param bucket                The bucket
 * @param key                   The key of the final file
 * @param adaptiveUploadRequest The request
 * @param handler               Success handler
 * @param exceptionHandler      Exception handler
 */
public void adaptiveUpload(String bucket, String key, AdaptiveUploadRequest adaptiveUploadRequest,
        Handler<Response<CommonResponseHeaders, Void>> handler, Handler<Throwable> exceptionHandler) {
    checkNotNull(StringUtils.trimToNull(bucket), "bucket must not be null");
    checkNotNull(StringUtils.trimToNull(key), "key must not be null");
    checkNotNull(adaptiveUploadRequest, "adaptiveUploadRequest must not be null");
    checkNotNull(handler, "handler must not be null");
    checkNotNull(exceptionHandler, "exceptionHandler must not be null");

    final ChunkedBufferReadStream chunkedBufferReadStream = new ChunkedBufferReadStream(vertx,
            adaptiveUploadRequest.getReadStream(), FIVE_MB_IN_BYTES);
    chunkedBufferReadStream.exceptionHandler(throwable -> exceptionHandler.handle(throwable));
    chunkedBufferReadStream.setChunkHandler(chunk -> {
        if (chunkedBufferReadStream.numberOfChunks() == 0) {
            if (chunk.length() < FIVE_MB_IN_BYTES) {
                final Buffer buffer = Buffer.buffer();
                chunkedBufferReadStream.handler(buffer::appendBuffer);
                chunkedBufferReadStream.endHandler(aVoid -> {
                    putObject(bucket, key,
                            mapAdaptiveUploadRequestToPutObjectRequest(buffer, adaptiveUploadRequest),
                            event -> handler.handle(new HeaderOnlyResponse(event.getHeader())),
                            exceptionHandler);
                });
                chunkedBufferReadStream.resume();
            } else {
                chunkedBufferReadStream.pause();
                initMultipartUpload(bucket, key,
                        mapAdaptiveUploadRequestToInitMultipartUploadRequest(adaptiveUploadRequest), event -> {
                            try {
                                if (adaptiveUploadRequest.getWriteQueueMaxSize() != null) {
                                    event.getData()
                                            .setWriteQueueMaxSize(adaptiveUploadRequest.getWriteQueueMaxSize());
                                }
                                if (adaptiveUploadRequest.getBufferSize() != null) {
                                    event.getData().bufferSize(adaptiveUploadRequest.getBufferSize());
                                }
                                event.getData()
                                        .exceptionHandler(throwable -> exceptionHandler.handle(throwable));
                                Pump.pump(chunkedBufferReadStream, event.getData()).start();
                                chunkedBufferReadStream
                                        .endHandler(aVoid -> event.getData().end(endResponse -> handler
                                                .handle(new HeaderOnlyResponse(event.getHeader()))));
                                chunkedBufferReadStream.resume();
                            } catch (Throwable t) {
                                exceptionHandler.handle(t);
                            }
                        }, exceptionHandler);
            }
        }
    });
    chunkedBufferReadStream.resume();
}

From source file:com.hubrick.vertx.s3.client.S3ClientRequest.java

License:Apache License

@Override
public void end() {
    initAuthenticationHeader(Buffer.buffer());

    request.end();
    logBody(Buffer.buffer());
}

From source file:com.hubrick.vertx.s3.client.S3ClientRequest.java

License:Apache License

protected void initAuthenticationHeaderBeforePayload() {
    if (signPayload) {
        throw new RuntimeException("Can not stream to request with signed payload");
    }//from  ww  w. j  av a2s.co  m
    initAuthenticationHeader(Buffer.buffer());
}