Example usage for java.nio.channels Channels newInputStream

List of usage examples for java.nio.channels Channels newInputStream

Introduction

In this page you can find the example usage for java.nio.channels Channels newInputStream.

Prototype

public static InputStream newInputStream(AsynchronousByteChannel ch) 

Source Link

Document

Constructs a stream that reads bytes from the given channel.

Usage

From source file:org.apache.beam.sdk.io.AvroSource.java

/**
 * Reads the {@link AvroMetadata} from the header of an Avro file.
 *
 * <p>This method parses the header of an Avro
 * <a href="https://avro.apache.org/docs/1.7.7/spec.html#Object+Container+Files">
 * Object Container File</a>./*www . j  av  a 2 s.c o m*/
 *
 * @throws IOException if the file is an invalid format.
 */
@VisibleForTesting
static AvroMetadata readMetadataFromFile(ResourceId fileResource) throws IOException {
    String codec = null;
    String schemaString = null;
    byte[] syncMarker;
    try (InputStream stream = Channels.newInputStream(FileSystems.open(fileResource))) {
        BinaryDecoder decoder = DecoderFactory.get().binaryDecoder(stream, null);

        // The header of an object container file begins with a four-byte magic number, followed
        // by the file metadata (including the schema and codec), encoded as a map. Finally, the
        // header ends with the file's 16-byte sync marker.
        // See https://avro.apache.org/docs/1.7.7/spec.html#Object+Container+Files for details on
        // the encoding of container files.

        // Read the magic number.
        byte[] magic = new byte[DataFileConstants.MAGIC.length];
        decoder.readFixed(magic);
        if (!Arrays.equals(magic, DataFileConstants.MAGIC)) {
            throw new IOException("Missing Avro file signature: " + fileResource);
        }

        // Read the metadata to find the codec and schema.
        ByteBuffer valueBuffer = ByteBuffer.allocate(512);
        long numRecords = decoder.readMapStart();
        while (numRecords > 0) {
            for (long recordIndex = 0; recordIndex < numRecords; recordIndex++) {
                String key = decoder.readString();
                // readBytes() clears the buffer and returns a buffer where:
                // - position is the start of the bytes read
                // - limit is the end of the bytes read
                valueBuffer = decoder.readBytes(valueBuffer);
                byte[] bytes = new byte[valueBuffer.remaining()];
                valueBuffer.get(bytes);
                if (key.equals(DataFileConstants.CODEC)) {
                    codec = new String(bytes, "UTF-8");
                } else if (key.equals(DataFileConstants.SCHEMA)) {
                    schemaString = new String(bytes, "UTF-8");
                }
            }
            numRecords = decoder.mapNext();
        }
        if (codec == null) {
            codec = DataFileConstants.NULL_CODEC;
        }

        // Finally, read the sync marker.
        syncMarker = new byte[DataFileConstants.SYNC_SIZE];
        decoder.readFixed(syncMarker);
    }
    checkState(schemaString != null, "No schema present in Avro file metadata %s", fileResource);
    return new AvroMetadata(syncMarker, codec, schemaString);
}

From source file:com.metamx.druid.indexing.coordinator.ForkingTaskRunner.java

@Override
public Optional<InputSupplier<InputStream>> streamTaskLog(final String taskid, final long offset) {
    final ProcessHolder processHolder;

    synchronized (tasks) {
        final ForkingTaskRunnerWorkItem taskWorkItem = tasks.get(taskid);
        if (taskWorkItem != null && taskWorkItem.processHolder != null) {
            processHolder = taskWorkItem.processHolder;
        } else {/*from  www  .  j av  a 2  s . c om*/
            return Optional.absent();
        }
    }

    return Optional.<InputSupplier<InputStream>>of(new InputSupplier<InputStream>() {
        @Override
        public InputStream getInput() throws IOException {
            final RandomAccessFile raf = new RandomAccessFile(processHolder.logFile, "r");
            final long rafLength = raf.length();
            if (offset > 0) {
                raf.seek(offset);
            } else if (offset < 0 && offset < rafLength) {
                raf.seek(rafLength + offset);
            }
            return Channels.newInputStream(raf.getChannel());
        }
    });
}

From source file:com.norconex.commons.lang.io.CachedInputStream.java

@SuppressWarnings("resource")
private void createInputStreamFromCache() throws FileNotFoundException {
    if (fileCache != null) {
        LOG.debug("Creating new input stream from file cache.");
        RandomAccessFile f = new RandomAccessFile(fileCache, "r");
        FileChannel channel = f.getChannel();
        inputStream = Channels.newInputStream(channel);
    } else {/*from w  w  w.  j  a v a2 s . com*/
        LOG.debug("Creating new input stream from memory cache.");
        inputStream = new ByteArrayInputStream(memCache);
    }
    needNewStream = false;
}

From source file:org.polago.deployconf.DeployConfRunner.java

/**
 * Gets a DeploymentConfig instance from a Path.
 *
 * @param path the file to use/*w  w w. j  ava2  s  . c  om*/
 * @return a DeploymentConfig representation of the ReadableByteChannel
 * @throws Exception indicating error
 */
private DeploymentConfig getDeploymentConfigFromPath(Path path) throws Exception {

    ReadableByteChannel ch = FileChannel.open(path, StandardOpenOption.READ);
    InputStream is = Channels.newInputStream(ch);

    DeploymentReader reader = new DeploymentReader(is, getGroupManager());
    DeploymentConfig result = reader.parse();
    ch.close();

    return result;
}

From source file:com.linkedin.databus.client.netty.NettyHttpDatabusRelayConnection.java

@Override
public void finishResponse() throws Exception {
    super.finishResponse();
    if (_errorHandled) {
        return;//  www .  j av a  2 s  .c  o m
    }

    final String sourcesResponseError = "/sources response error: ";
    try {
        String exceptionName = RemoteExceptionHandler.getExceptionName(_decorated);
        if (null != exceptionName) {
            LOG.error(sourcesResponseError + RemoteExceptionHandler.getExceptionMessage(_decorated));
            _stateReuse.switchToSourcesResponseError();
        } else {
            String hostHdr = DbusConstants.UNKNOWN_HOST;
            String svcHdr = DbusConstants.UNKNOWN_SERVICE_ID;
            if (null != getParent()) {
                hostHdr = getParent().getRemoteHost();
                svcHdr = getParent().getRemoteService();
                LOG.info("initiated sesssion to host " + hostHdr + " service " + svcHdr);
            }

            InputStream bodyStream = Channels.newInputStream(_decorated);
            ObjectMapper mapper = new ObjectMapper();

            List<IdNamePair> sources = mapper.readValue(bodyStream, new TypeReference<List<IdNamePair>>() {
            });
            _stateReuse.switchToSourcesSuccess(sources, hostHdr, svcHdr);
        }
    } catch (IOException ex) {
        LOG.error(sourcesResponseError, ex);
        _stateReuse.switchToSourcesResponseError();
    } catch (RuntimeException ex) {
        LOG.error(sourcesResponseError, ex);
        _stateReuse.switchToSourcesResponseError();
    }

    _callback.enqueueMessage(_stateReuse);
}

From source file:com.linkedin.databus.client.netty.NettyHttpDatabusRelayConnection.java

@Override
public void finishResponse() throws Exception {
    super.finishResponse();
    if (_errorHandled) {
        return;//from w w  w. j  a  v  a 2  s.  c o m
    }

    final String registerResponseError = "/register response error: ";
    try {
        String exceptionName = RemoteExceptionHandler.getExceptionName(_decorated);
        if (null != exceptionName) {
            LOG.error(registerResponseError + RemoteExceptionHandler.getExceptionMessage(_decorated));
            _stateReuse.switchToRegisterResponseError();
        } else {
            InputStream bodyStream = Channels.newInputStream(_decorated);
            String bodyStr = IOUtils.toString(bodyStream, Charset.defaultCharset().name());
            IOUtils.closeQuietly(bodyStream);
            if (NettyHttpDatabusRelayConnection.needCompress) {
                try {
                    bodyStr = CompressUtil.uncompress(bodyStr);
                } catch (Exception e)//failed because the steam may be not compressed
                {
                }
            }

            ObjectMapper mapper = new ObjectMapper();
            int registerResponseVersion = 3; // either 2 or 3 would suffice here; we care only about 4

            if (_registerResponseVersionHdr != null) {
                try {
                    registerResponseVersion = Integer.parseInt(_registerResponseVersionHdr);
                } catch (NumberFormatException e) {
                    throw new RuntimeException("Could not parse /register response protocol version: "
                            + _registerResponseVersionHdr);
                }
                if (registerResponseVersion < 2 || registerResponseVersion > 4) {
                    throw new RuntimeException(
                            "Out-of-range /register response protocol version: " + _registerResponseVersionHdr);
                }
            }

            if (registerResponseVersion == 4) // DDSDBUS-2009
            {
                HashMap<String, List<Object>> responseMap = mapper.readValue(bodyStr,
                        new TypeReference<HashMap<String, List<Object>>>() {
                        });

                // Look for mandatory SOURCE_SCHEMAS_KEY.
                Map<Long, List<RegisterResponseEntry>> sourcesSchemasMap = RegisterResponseEntry
                        .createFromResponse(responseMap, RegisterResponseEntry.SOURCE_SCHEMAS_KEY, false);
                // Look for optional KEY_SCHEMAS_KEY
                // Key schemas, if they exist, should correspond to source schemas, but it's not
                // a one-to-one mapping.  The same version of a key schema may be used for several
                // versions of a source schema, or vice versa.  (The IDs must correspond.)
                //
                // TODO (DDSDBUS-xxx):  support key schemas on the relay side, too
                Map<Long, List<RegisterResponseEntry>> keysSchemasMap = RegisterResponseEntry
                        .createFromResponse(responseMap, RegisterResponseEntry.KEY_SCHEMAS_KEY, true);

                // Look for optional METADATA_SCHEMAS_KEY
                List<RegisterResponseMetadataEntry> metadataSchemasList = RegisterResponseMetadataEntry
                        .createFromResponse(responseMap, RegisterResponseMetadataEntry.METADATA_SCHEMAS_KEY,
                                true);

                _stateReuse.switchToRegisterSuccess(sourcesSchemasMap, keysSchemasMap, metadataSchemasList);
            } else // version 2 or 3
            {
                List<RegisterResponseEntry> schemasList = mapper.readValue(bodyStr,
                        new TypeReference<List<RegisterResponseEntry>>() {
                        });

                Map<Long, List<RegisterResponseEntry>> sourcesSchemasMap = RegisterResponseEntry
                        .convertSchemaListToMap(schemasList);

                _stateReuse.switchToRegisterSuccess(sourcesSchemasMap, null, null);
            }
        }
    } catch (IOException ex) {
        LOG.error(registerResponseError, ex);
        _stateReuse.switchToRegisterResponseError();
    } catch (RuntimeException ex) {
        LOG.error(registerResponseError, ex);
        _stateReuse.switchToRegisterResponseError();
    }

    _callback.enqueueMessage(_stateReuse);
}

From source file:io.minio.MinioClient.java

/**
 * Creates Request object for given request parameters.
 *
 * @param method         HTTP method./*w  w  w.  j  a  v a2s . c  o  m*/
 * @param bucketName     Bucket name.
 * @param objectName     Object name in the bucket.
 * @param region         Amazon S3 region of the bucket.
 * @param headerMap      Map of HTTP headers for the request.
 * @param queryParamMap  Map of HTTP query parameters of the request.
 * @param contentType    Content type of the request body.
 * @param body           HTTP request body.
 * @param length         Length of HTTP request body.
 */
private Request createRequest(Method method, String bucketName, String objectName, String region,
        Map<String, String> headerMap, Map<String, String> queryParamMap, final String contentType,
        final Object body, final int length)
        throws InvalidBucketNameException, NoSuchAlgorithmException, InsufficientDataException, IOException {
    if (bucketName == null && objectName != null) {
        throw new InvalidBucketNameException(NULL_STRING, "null bucket name for object '" + objectName + "'");
    }

    HttpUrl.Builder urlBuilder = this.baseUrl.newBuilder();

    if (bucketName != null) {
        checkBucketName(bucketName);

        String host = this.baseUrl.host();
        if (host.equals(S3_AMAZONAWS_COM)) {
            // special case: handle s3.amazonaws.com separately
            if (region != null) {
                host = AwsS3Endpoints.INSTANCE.endpoint(region);
            }

            boolean usePathStyle = false;
            if (method == Method.PUT && objectName == null && queryParamMap == null) {
                // use path style for make bucket to workaround "AuthorizationHeaderMalformed" error from s3.amazonaws.com
                usePathStyle = true;
            } else if (queryParamMap != null && queryParamMap.containsKey("location")) {
                // use path style for location query
                usePathStyle = true;
            } else if (bucketName.contains(".") && this.baseUrl.isHttps()) {
                // use path style where '.' in bucketName causes SSL certificate validation error
                usePathStyle = true;
            }

            if (usePathStyle) {
                urlBuilder.host(host);
                urlBuilder.addPathSegment(bucketName);
            } else {
                urlBuilder.host(bucketName + "." + host);
            }
        } else {
            urlBuilder.addPathSegment(bucketName);
        }
    }

    if (objectName != null) {
        for (String pathSegment : objectName.split("/")) {
            // Limitation:
            // 1. OkHttp does not allow to add '.' and '..' as path segment.
            // 2. Its not allowed to add path segment as '/', '//', '/usr' or 'usr/'.
            urlBuilder.addPathSegment(pathSegment);
        }
    }

    if (queryParamMap != null) {
        for (Map.Entry<String, String> entry : queryParamMap.entrySet()) {
            urlBuilder.addQueryParameter(entry.getKey(), entry.getValue());
        }
    }

    RequestBody requestBody = null;
    if (body != null) {
        requestBody = new RequestBody() {
            @Override
            public MediaType contentType() {
                if (contentType != null) {
                    return MediaType.parse(contentType);
                } else {
                    return MediaType.parse("application/octet-stream");
                }
            }

            @Override
            public long contentLength() {
                if (body instanceof InputStream || body instanceof RandomAccessFile || body instanceof byte[]) {
                    return length;
                }

                if (length == 0) {
                    return -1;
                } else {
                    return length;
                }
            }

            @Override
            public void writeTo(BufferedSink sink) throws IOException {
                byte[] data = null;

                if (body instanceof InputStream) {
                    InputStream stream = (InputStream) body;
                    sink.write(Okio.source(stream), length);
                } else if (body instanceof RandomAccessFile) {
                    RandomAccessFile file = (RandomAccessFile) body;
                    sink.write(Okio.source(Channels.newInputStream(file.getChannel())), length);
                } else if (body instanceof byte[]) {
                    sink.write(data, 0, length);
                } else {
                    sink.writeUtf8(body.toString());
                }
            }
        };
    }

    HttpUrl url = urlBuilder.build();
    // urlBuilder does not encode some characters properly for Amazon S3.
    // Encode such characters properly here.
    List<String> pathSegments = url.encodedPathSegments();
    urlBuilder = url.newBuilder();
    for (int i = 0; i < pathSegments.size(); i++) {
        urlBuilder.setEncodedPathSegment(i,
                pathSegments.get(i).replaceAll("\\!", "%21").replaceAll("\\$", "%24").replaceAll("\\&", "%26")
                        .replaceAll("\\'", "%27").replaceAll("\\(", "%28").replaceAll("\\)", "%29")
                        .replaceAll("\\*", "%2A").replaceAll("\\+", "%2B").replaceAll("\\,", "%2C")
                        .replaceAll("\\:", "%3A").replaceAll("\\;", "%3B").replaceAll("\\=", "%3D")
                        .replaceAll("\\@", "%40").replaceAll("\\[", "%5B").replaceAll("\\]", "%5D"));
    }
    url = urlBuilder.build();

    Request.Builder requestBuilder = new Request.Builder();
    requestBuilder.url(url);
    requestBuilder.method(method.toString(), requestBody);
    if (headerMap != null) {
        for (Map.Entry<String, String> entry : headerMap.entrySet()) {
            requestBuilder.header(entry.getKey(), entry.getValue());
        }
    }

    String sha256Hash = null;
    String md5Hash = null;
    if (this.accessKey != null && this.secretKey != null) {
        // No need to compute sha256 if endpoint scheme is HTTPS. Issue #415.
        if (url.isHttps()) {
            sha256Hash = "UNSIGNED-PAYLOAD";
            if (body instanceof BufferedInputStream) {
                md5Hash = Digest.md5Hash((BufferedInputStream) body, length);
            } else if (body instanceof RandomAccessFile) {
                md5Hash = Digest.md5Hash((RandomAccessFile) body, length);
            } else if (body instanceof byte[]) {
                byte[] data = (byte[]) body;
                md5Hash = Digest.md5Hash(data, length);
            }
        } else {
            if (body == null) {
                sha256Hash = Digest.sha256Hash(new byte[0]);
            } else {
                if (body instanceof BufferedInputStream) {
                    String[] hashes = Digest.sha256md5Hashes((BufferedInputStream) body, length);
                    sha256Hash = hashes[0];
                    md5Hash = hashes[1];
                } else if (body instanceof RandomAccessFile) {
                    String[] hashes = Digest.sha256md5Hashes((RandomAccessFile) body, length);
                    sha256Hash = hashes[0];
                    md5Hash = hashes[1];
                } else if (body instanceof byte[]) {
                    byte[] data = (byte[]) body;
                    sha256Hash = Digest.sha256Hash(data, length);
                    md5Hash = Digest.md5Hash(data, length);
                } else {
                    sha256Hash = Digest.sha256Hash(body.toString());
                }
            }
        }
    }

    if (md5Hash != null) {
        requestBuilder.header("Content-MD5", md5Hash);
    }
    if (url.port() == 80 || url.port() == 443) {
        requestBuilder.header("Host", url.host());
    } else {
        requestBuilder.header("Host", url.host() + ":" + url.port());
    }
    requestBuilder.header("User-Agent", this.userAgent);
    if (sha256Hash != null) {
        requestBuilder.header("x-amz-content-sha256", sha256Hash);
    }
    DateTime date = new DateTime();
    requestBuilder.header("x-amz-date", date.toString(DateFormat.AMZ_DATE_FORMAT));

    return requestBuilder.build();
}