Example usage for com.google.common.io ByteSource empty

List of usage examples for com.google.common.io ByteSource empty

Introduction

In this page you can find the example usage for com.google.common.io ByteSource empty.

Prototype

public static ByteSource empty() 

Source Link

Document

Returns an immutable ByteSource that contains no bytes.

Usage

From source file:io.druid.client.DirectDruidClient.java

@Override
public Sequence<T> run(final Query<T> query, final Map<String, Object> context) {
    QueryToolChest<T, Query<T>> toolChest = warehouse.getToolChest(query);
    boolean isBySegment = query.getContextBySegment(false);

    Pair<JavaType, JavaType> types = typesMap.get(query.getClass());
    if (types == null) {
        final TypeFactory typeFactory = objectMapper.getTypeFactory();
        JavaType baseType = typeFactory.constructType(toolChest.getResultTypeReference());
        JavaType bySegmentType = typeFactory.constructParametricType(Result.class,
                typeFactory.constructParametricType(BySegmentResultValueClass.class, baseType));
        types = Pair.of(baseType, bySegmentType);
        typesMap.put(query.getClass(), types);
    }// w ww.  j a v  a2 s  .  c  om

    final JavaType typeRef;
    if (isBySegment) {
        typeRef = types.rhs;
    } else {
        typeRef = types.lhs;
    }

    final ListenableFuture<InputStream> future;
    final String url = String.format("http://%s/druid/v2/", host);
    final String cancelUrl = String.format("http://%s/druid/v2/%s", host, query.getId());

    try {
        log.debug("Querying url[%s]", url);

        final long requestStartTime = System.currentTimeMillis();

        final ServiceMetricEvent.Builder builder = toolChest.makeMetricBuilder(query);
        builder.setDimension("server", host);
        builder.setDimension(DruidMetrics.ID, Strings.nullToEmpty(query.getId()));

        final HttpResponseHandler<InputStream, InputStream> responseHandler = new HttpResponseHandler<InputStream, InputStream>() {
            private long responseStartTime;
            private final AtomicLong byteCount = new AtomicLong(0);
            private final BlockingQueue<InputStream> queue = new LinkedBlockingQueue<>();
            private final AtomicBoolean done = new AtomicBoolean(false);

            @Override
            public ClientResponse<InputStream> handleResponse(HttpResponse response) {
                log.debug("Initial response from url[%s]", url);
                responseStartTime = System.currentTimeMillis();
                emitter.emit(builder.build("query/node/ttfb", responseStartTime - requestStartTime));

                try {
                    final String responseContext = response.headers().get("X-Druid-Response-Context");
                    // context may be null in case of error or query timeout
                    if (responseContext != null) {
                        context.putAll(objectMapper.<Map<String, Object>>readValue(responseContext,
                                new TypeReference<Map<String, Object>>() {
                                }));
                    }
                    queue.put(new ChannelBufferInputStream(response.getContent()));
                } catch (final IOException e) {
                    log.error(e, "Error parsing response context from url [%s]", url);
                    return ClientResponse.<InputStream>finished(new InputStream() {
                        @Override
                        public int read() throws IOException {
                            throw e;
                        }
                    });
                } catch (InterruptedException e) {
                    log.error(e, "Queue appending interrupted");
                    Thread.currentThread().interrupt();
                    throw Throwables.propagate(e);
                }
                byteCount.addAndGet(response.getContent().readableBytes());
                return ClientResponse
                        .<InputStream>finished(new SequenceInputStream(new Enumeration<InputStream>() {
                            @Override
                            public boolean hasMoreElements() {
                                // Done is always true until the last stream has be put in the queue.
                                // Then the stream should be spouting good InputStreams.
                                synchronized (done) {
                                    return !done.get() || !queue.isEmpty();
                                }
                            }

                            @Override
                            public InputStream nextElement() {
                                try {
                                    return queue.take();
                                } catch (InterruptedException e) {
                                    Thread.currentThread().interrupt();
                                    throw Throwables.propagate(e);
                                }
                            }
                        }));
            }

            @Override
            public ClientResponse<InputStream> handleChunk(ClientResponse<InputStream> clientResponse,
                    HttpChunk chunk) {
                final ChannelBuffer channelBuffer = chunk.getContent();
                final int bytes = channelBuffer.readableBytes();
                if (bytes > 0) {
                    try {
                        queue.put(new ChannelBufferInputStream(channelBuffer));
                    } catch (InterruptedException e) {
                        log.error(e, "Unable to put finalizing input stream into Sequence queue for url [%s]",
                                url);
                        Thread.currentThread().interrupt();
                        throw Throwables.propagate(e);
                    }
                    byteCount.addAndGet(bytes);
                }
                return clientResponse;
            }

            @Override
            public ClientResponse<InputStream> done(ClientResponse<InputStream> clientResponse) {
                long stopTime = System.currentTimeMillis();
                log.debug("Completed request to url[%s] with %,d bytes returned in %,d millis [%,f b/s].", url,
                        byteCount.get(), stopTime - responseStartTime,
                        byteCount.get() / (0.0001 * (stopTime - responseStartTime)));
                emitter.emit(builder.build("query/node/time", stopTime - requestStartTime));
                synchronized (done) {
                    try {
                        // An empty byte array is put at the end to give the SequenceInputStream.close() as something to close out
                        // after done is set to true, regardless of the rest of the stream's state.
                        queue.put(ByteSource.empty().openStream());
                    } catch (InterruptedException e) {
                        log.error(e, "Unable to put finalizing input stream into Sequence queue for url [%s]",
                                url);
                        Thread.currentThread().interrupt();
                        throw Throwables.propagate(e);
                    } catch (IOException e) {
                        // This should never happen
                        throw Throwables.propagate(e);
                    } finally {
                        done.set(true);
                    }
                }
                return ClientResponse.<InputStream>finished(clientResponse.getObj());
            }

            @Override
            public void exceptionCaught(final ClientResponse<InputStream> clientResponse, final Throwable e) {
                // Don't wait for lock in case the lock had something to do with the error
                synchronized (done) {
                    done.set(true);
                    // Make a best effort to put a zero length buffer into the queue in case something is waiting on the take()
                    // If nothing is waiting on take(), this will be closed out anyways.
                    queue.offer(new InputStream() {
                        @Override
                        public int read() throws IOException {
                            throw new IOException(e);
                        }
                    });
                }
            }
        };
        future = httpClient.go(new Request(HttpMethod.POST, new URL(url))
                .setContent(objectMapper.writeValueAsBytes(query)).setHeader(HttpHeaders.Names.CONTENT_TYPE,
                        isSmile ? SmileMediaTypes.APPLICATION_JACKSON_SMILE : MediaType.APPLICATION_JSON),
                responseHandler);

        queryWatcher.registerQuery(query, future);

        openConnections.getAndIncrement();
        Futures.addCallback(future, new FutureCallback<InputStream>() {
            @Override
            public void onSuccess(InputStream result) {
                openConnections.getAndDecrement();
            }

            @Override
            public void onFailure(Throwable t) {
                openConnections.getAndDecrement();
                if (future.isCancelled()) {
                    // forward the cancellation to underlying queriable node
                    try {
                        StatusResponseHolder res = httpClient.go(
                                new Request(HttpMethod.DELETE, new URL(cancelUrl))
                                        .setContent(objectMapper.writeValueAsBytes(query))
                                        .setHeader(HttpHeaders.Names.CONTENT_TYPE,
                                                isSmile ? SmileMediaTypes.APPLICATION_JACKSON_SMILE
                                                        : MediaType.APPLICATION_JSON),
                                new StatusResponseHandler(Charsets.UTF_8)).get();
                        if (res.getStatus().getCode() >= 500) {
                            throw new RE("Error cancelling query[%s]: queriable node returned status[%d] [%s].",
                                    res.getStatus().getCode(), res.getStatus().getReasonPhrase());
                        }
                    } catch (IOException | ExecutionException | InterruptedException e) {
                        Throwables.propagate(e);
                    }
                }
            }
        });
    } catch (IOException e) {
        throw Throwables.propagate(e);
    }

    Sequence<T> retVal = new BaseSequence<>(new BaseSequence.IteratorMaker<T, JsonParserIterator<T>>() {
        @Override
        public JsonParserIterator<T> make() {
            return new JsonParserIterator<T>(typeRef, future, url);
        }

        @Override
        public void cleanup(JsonParserIterator<T> iterFromMake) {
            CloseQuietly.close(iterFromMake);
        }
    });

    // bySegment queries are de-serialized after caching results in order to
    // avoid the cost of de-serializing and then re-serializing again when adding to cache
    if (!isBySegment) {
        retVal = Sequences.map(retVal,
                toolChest.makePreComputeManipulatorFn(query, MetricManipulatorFns.deserializing()));
    }

    return retVal;
}

From source file:org.jooby.internal.SseRenderer.java

public byte[] format(final Sse.Event event) throws Exception {
    // comment?/*  ww w .j a va2 s . co m*/
    data = event.comment().map(comment -> ByteSource.concat(COMMENT, bytes(comment), NL))
            .orElse(ByteSource.empty());

    // id?
    data = event.id().map(id -> ByteSource.concat(data, ID, bytes(id.toString()), NL)).orElse(data);

    // event?
    data = event.name().map(name -> ByteSource.concat(data, EVENT, bytes(name), NL)).orElse(data);

    // retry?
    data = event.retry().map(retry -> ByteSource.concat(data, RETRY, bytes(Long.toString(retry)), NL))
            .orElse(data);

    Optional<Object> value = event.data();
    if (value.isPresent()) {
        render(value.get());
    }

    data = ByteSource.concat(data, NL);

    byte[] bytes = data.read();
    data = null;
    return bytes;
}

From source file:org.jclouds.kinetic.strategy.internal.KineticStorageStrategyImpl.java

private Blob getChunkedBlob(final String container, final String key, final long chunkId) {
    BlobBuilder builder = blobBuilders.get();
    builder.name(key);//from  ww  w  .j  ava2 s  .c  om
    File file = getFileForBlobKey(container, key);

    try {
        List<String> chunkKeys = KineticDatabaseUtils.getInstance()
                .getFileChunkKeysFromDatabase(file.getPath());
        byte[] blobData = new byte[0];
        for (String chunkKey : chunkKeys) {
            byte[] data = KineticDatabaseUtils.getInstance().getChunkFromDatabase(chunkKey);
            blobData = ArrayUtils.addAll(blobData, data);
        }

        return this.createBlobFromByteSource(container, key, ByteSource.wrap(blobData));

    } catch (SQLException sqle) {

        ByteSource byteSource;

        if (getDirectoryBlobSuffix(key) != null) {
            logger.debug("%s - %s is a directory", container, key);
            byteSource = ByteSource.empty();
        } else {
            byteSource = Files.asByteSource(file).slice(chunkId, KineticConstants.PROPERTY_CHUNK_SIZE_BYTES
                    - KineticConstants.PROPERTY_CHUNK_FULL_HEADER_SIZE_BYTES);
        }

        return this.createBlobFromByteSource(container, key, byteSource);
    }
}

From source file:org.jclouds.blobstore.config.LocalBlobStore.java

@Override
public MultipartUpload initiateMultipartUpload(String container, BlobMetadata blobMetadata,
        PutOptions options) {//  w  w  w .  j  a v a  2 s .  c  o  m
    String uploadId = UUID.randomUUID().toString();
    // create a stub blob
    Blob blob = blobBuilder(MULTIPART_PREFIX + uploadId + "-" + blobMetadata.getName() + "-stub")
            .payload(ByteSource.empty()).build();
    putBlob(container, blob);
    return MultipartUpload.create(container, blobMetadata.getName(), uploadId, blobMetadata, options);
}

From source file:org.gaul.s3proxy.S3ProxyHandler.java

private void handleInitiateMultipartUpload(HttpServletRequest request, HttpServletResponse response,
        BlobStore blobStore, String containerName, String blobName) throws IOException {
    ByteSource payload = ByteSource.empty();
    BlobBuilder.PayloadBlobBuilder builder = blobStore.blobBuilder(blobName).payload(payload);
    addContentMetdataFromHttpRequest(builder, request);
    builder.contentLength(payload.size());
    Blob blob = builder.build();// w w w  . j  av a 2  s  . c om

    // S3 requires blob metadata during the initiate call while Azure and
    // Swift require it in the complete call.  Store a stub blob which
    // allows reproducing this metadata later.
    blobStore.putBlob(containerName, blob);

    MultipartUpload mpu = blobStore.initiateMultipartUpload(containerName, blob.getMetadata());

    try (Writer writer = response.getWriter()) {
        XMLStreamWriter xml = xmlOutputFactory.createXMLStreamWriter(writer);
        xml.writeStartDocument();
        xml.writeStartElement("InitiateMultipartUploadResult");
        xml.writeDefaultNamespace(AWS_XMLNS);

        writeSimpleElement(xml, "Bucket", containerName);
        writeSimpleElement(xml, "Key", blobName);
        writeSimpleElement(xml, "UploadId", mpu.id());

        xml.writeEndElement();
        xml.flush();
    } catch (XMLStreamException xse) {
        throw new IOException(xse);
    }
}