Example usage for java.io PipedInputStream PipedInputStream

List of usage examples for java.io PipedInputStream PipedInputStream

Introduction

In this page you can find the example usage for java.io PipedInputStream PipedInputStream.

Prototype

public PipedInputStream(int pipeSize) 

Source Link

Document

Creates a PipedInputStream so that it is not yet #connect(java.io.PipedOutputStream) connected and uses the specified pipe size for the pipe's buffer.

Usage

From source file:org.apache.flink.runtime.operators.util.OutputEmitterTest.java

@Test
public void testWrongKeyClass() {

    // Test for IntValue
    @SuppressWarnings("unchecked")
    final TypeComparator<Record> doubleComp = new RecordComparatorFactory(new int[] { 0 },
            new Class[] { DoubleValue.class }).createComparator();
    final ChannelSelector<SerializationDelegate<Record>> oe1 = new OutputEmitter<Record>(
            ShipStrategyType.PARTITION_HASH, doubleComp);
    final SerializationDelegate<Record> delegate = new SerializationDelegate<Record>(
            new RecordSerializerFactory().getSerializer());

    PipedInputStream pipedInput = new PipedInputStream(1024 * 1024);
    DataInputStream in = new DataInputStream(pipedInput);
    DataOutputStream out;//from  w  ww .j  av a 2  s. co m
    Record rec = null;

    try {
        out = new DataOutputStream(new PipedOutputStream(pipedInput));

        rec = new Record(1);
        rec.setField(0, new IntValue());

        rec.write(new OutputViewDataOutputStreamWrapper(out));
        rec = new Record();
        rec.read(new InputViewDataInputStreamWrapper(in));

    } catch (IOException e) {
        fail("Test erroneous");
    }

    try {
        delegate.setInstance(rec);
        oe1.selectChannels(delegate, 100);
    } catch (DeserializationException re) {
        return;
    }
    Assert.fail("Expected a NullKeyFieldException.");
}

From source file:org.apache.tajo.cli.tsql.TestTajoCli.java

License:asdf

@Test
public void testRunWhenError() throws Exception {
    Thread t = new Thread() {
        public void run() {
            try {
                PipedOutputStream po = new PipedOutputStream();
                InputStream is = new PipedInputStream(po);
                ByteArrayOutputStream out = new ByteArrayOutputStream();

                TajoConf tajoConf = new TajoConf();
                setVar(tajoCli, SessionVars.CLI_FORMATTER_CLASS, TajoCliOutputTestFormatter.class.getName());
                Properties connParams = new Properties();
                connParams.setProperty(ClientParameters.RETRY, "3");
                TajoCli tc = new TajoCli(tajoConf, new String[] {}, connParams, is, out, err);

                tc.executeMetaCommand("\\set ON_ERROR_STOP false");
                assertSessionVar(tc, SessionVars.ON_ERROR_STOP.keyname(), "false");

                po.write(new String("asdf;\nqwe;\nzxcv;\n").getBytes());

                tc.runShell();//from w w  w  . j  a  v  a  2s. co m
            } catch (Exception e) {
                throw new RuntimeException("Cannot run thread in testRunWhenError", e);
            }
        }
    };

    t.start();
    Thread.sleep(1000);
    if (!t.isAlive()) {
        fail("TSQL should be alive");
    } else {
        t.interrupt();
        t.join();
    }
}

From source file:org.mobicents.slee.resource.tftp.TFTPTransfer.java

protected InputStream getInputStream() throws IOException {
    if (!isWrite())
        throw new IOException("No write request pending");
    if (sbbIs == null && os_ == null) {
        os_ = new PipedOutputStream();
        sbbIs = new PipedInputStream((PipedOutputStream) os_);
        resume();//w w w  .ja va2 s .c o  m
    }
    return sbbIs;
}

From source file:org.mobicents.slee.resource.tftp.TFTPTransfer.java

protected OutputStream getOutputStream() throws IOException {
    if (!isRead())
        throw new IOException("No read request pending");
    if (sbbOs == null && is_ == null) {
        sbbOs = new PipedOutputStream();
        is_ = new PipedInputStream((PipedOutputStream) sbbOs);
        resume();/*from w w w .jav a2s .  com*/
    }
    return sbbOs;
}

From source file:org.gradle.integtests.fixtures.executer.AbstractGradleExecuter.java

public InputStream connectStdIn() {
    try {/*w w  w. j  a v a 2 s  . c  o m*/
        return stdinPipe == null ? SafeStreams.emptyInput() : new PipedInputStream(stdinPipe);
    } catch (IOException e) {
        throw UncheckedException.throwAsUncheckedException(e);
    }
}

From source file:com.heliosapm.tsdblite.json.JSON.java

/**
 * Serializes the passed object and pipes back the results in an InputStream to read it back.
 * Spawns a thread to run the pipe out so the calling thread only needs to read the returned input stream.
 * If the serialization fails, the worker thread will close the inoput stream to signal the failure.
 * @param obj The object to serialize/*from   w ww .j  a v a 2 s .com*/
 * @return an InputStream to read back the JSON serialized object 
 */
public static InputStream serializeLoopBack(final Object obj) {
    if (obj == null)
        throw new IllegalArgumentException("The passed object was null");
    try {
        final PipedInputStream pin = new PipedInputStream(2048);
        final PipedOutputStream pout = new PipedOutputStream(pin);
        final Thread t = new Thread("serializeLoopBackThread") {
            @Override
            public void run() {
                try {
                    serialize(obj, pout);
                } catch (Exception ex) {
                    try {
                        pin.close();
                    } catch (Exception x) {
                        /* No Op */}
                }
            }
        };
        t.setDaemon(true);
        t.start();
        return pin;
    } catch (Exception ex) {
        throw new RuntimeException("Failed to pipe serialized object", ex);
    }
}

From source file:com.github.vatbub.awsvpnlauncher.Main.java

/**
 * Copies {@code System.in} to new {@code InputStream}. Filters {@code CrLf}s ({@code \r\n} in Java) out and replaces them with a single {@code \n} ({@code \n} in Java)
 *
 * @return The {@code InputStream} to which the filtered contents are forwarded to.
 * @throws IOException If {@code System.in} cannot be read for any reason
 *//*from   w  ww  .  j ava  2s  .  c o m*/
private static InputStream copyAndFilterInputStream() throws IOException {
    PipedOutputStream forwardTo = new PipedOutputStream();
    PipedInputStream res = new PipedInputStream(forwardTo);
    Thread pipeThread = new Thread(() -> {
        while (true) {
            try {
                char ch = (char) System.in.read();
                if (ch != '\r' && !SystemUtils.IS_OS_MAC) {
                    forwardTo.write((int) ch);
                }
            } catch (IOException e) {
                FOKLogger.log(Main.class.getName(), Level.SEVERE,
                        "Stopped forwarding System in due to an exception", e);
                break;
            }
        }
    });
    pipeThread.setName("pipeThread");
    pipeThread.start();

    return res;
}

From source file:ch.iterate.openstack.swift.Client.java

/**
 * @param container          The name of the container
 * @param name               The name of the object
 * @param entity             The name of the request entity (make sure to set the Content-Type
 * @param metadata           The metadata for the object
 * @param md5sum             The 32 character hex encoded MD5 sum of the data
 * @param objectSize         The total size in bytes of the object to be stored
 * @param segmentSize        Optional size in bytes of the object segments to be stored (forces large object support) default 4G
 * @param dynamicLargeObject Optional setting to use dynamic large objects, False/null will use static large objects if required
 * @param segmentContainer   Optional name of container to store file segments, defaults to storing chunks in the same container as the file sill appear
 * @param segmentFolder      Optional name of folder for storing file segments, defaults to ".chunks/"
 * @param leaveSegments      Optional setting to leave segments of large objects in place when the manifest is overwrtten/changed
 * @return The ETAG if the save was successful, null otherwise
 * @throws GenericException There was a protocol level error talking to CloudFiles
 *//*from   w  w w .j  a  va2s .  co m*/
public String storeObject(Region region, String container, String name, HttpEntity entity,
        Map<String, String> metadata, String md5sum, Long objectSize, Long segmentSize,
        Boolean dynamicLargeObject, String segmentContainer, String segmentFolder, Boolean leaveSegments)
        throws IOException, InterruptedException {
    /*
     * Default values for large object support. We also use the defaults combined with the inputs
     * to determine whether to store as a large object.
     */

    /*
     * The maximum size of a single object (5GiB).
     */
    long singleObjectSizeLimit = (long) (5 * Math.pow(1024, 3));

    /*
     * The default minimum segment size (1MiB).
     */
    long minSegmentSize = 1024L * 1024L;

    /*
     * Set the segment size.
     *
     * Defaults to 4GiB segments, and will not permit smaller than 1MiB segments.
     */
    long actualSegmentSize = (segmentSize == null) ? (long) (4 * Math.pow(1024, 3))
            : Math.max(segmentSize, minSegmentSize);

    /*
     * Determines if we will store using large objects - we may do this for 3 reasons:
     *
     *  - A segmentSize has been specified and the object size is greater than the minimum segment size
     *  - If an objectSize is provided and is larger than the single object size limit of 5GiB
     *  - A segmentSize has been specified, but no objectSize given (we take this as a request for segmentation)
     *
     * The last case may fail if the user does not provide at least as much data as the minimum segment
     * size configured on the server, and will always produce a large object structure (even if only one
     * small segment is required).
     */
    objectSize = (objectSize == null) ? -1 : objectSize;
    boolean useLargeObject = ((segmentSize != null) && (objectSize > actualSegmentSize))
            || (objectSize > singleObjectSizeLimit) || ((segmentSize != null) && (objectSize == -1));

    if (!useLargeObject) {
        return storeObject(region, container, name, entity, metadata, md5sum);
    } else {
        /*
         * We need to upload a large object as defined by the method
         * parameters. For now this is done sequentially, but a parallel
         * version using appropriate random access to the underlying data
         * may be desirable.
         *
         * We make the assumption that the given file size will not be
         * greater than int.MAX_VALUE * segmentSize
         *
         */
        leaveSegments = (leaveSegments == null) ? Boolean.FALSE : leaveSegments;
        dynamicLargeObject = (dynamicLargeObject == null) ? Boolean.FALSE : dynamicLargeObject;
        segmentFolder = (segmentFolder == null) ? ".file-segments" : segmentFolder;
        segmentContainer = (segmentContainer == null) ? container : segmentContainer;

        Map<String, List<StorageObject>> oldSegmentsToRemove = null;

        /*
         * If we have chosen not to leave existing large object segments in place (default)
         * then we need to collect information about any existing file segments so that we can
         * deal with them after we complete the upload of the new manifest.
         *
         * We should only delete existing segments after a successful upload of a new manifest file
         * because this constitutes an object update and the older file should remain available
         * until the new file can be downloaded.
         */
        if (!leaveSegments) {
            ObjectMetadata existingMetadata;
            String manifestDLO = null;
            Boolean manifestSLO = Boolean.FALSE;

            try {
                existingMetadata = getObjectMetaData(region, container, name);

                if (existingMetadata.getMetaData().containsKey(Constants.MANIFEST_HEADER)) {
                    manifestDLO = existingMetadata.getMetaData().get(Constants.MANIFEST_HEADER);
                } else if (existingMetadata.getMetaData().containsKey(Constants.X_STATIC_LARGE_OBJECT)) {
                    JSONParser parser = new JSONParser();
                    String manifestSLOValue = existingMetadata.getMetaData()
                            .get(Constants.X_STATIC_LARGE_OBJECT);
                    manifestSLO = (Boolean) parser.parse(manifestSLOValue);
                }
            } catch (NotFoundException e) {
                /*
                 * Just means no object exists already, so continue
                 */
            } catch (ParseException e) {
                /*
                 * X_STATIC_LARGE_OBJECT header existed but failed to parse.
                 * If a static large object already exists this must be set to "true".
                 * If we got here then the X_STATIC_LARGE_OBJECT header existed, but failed
                 * to parse as a boolean, so fail upload as a precaution.
                 */
                return null;
            }

            if (manifestDLO != null) {
                /*
                 * We have found an existing dynamic large object, so use the prefix to get a list of
                 * existing objects. If we're putting up a new dlo, make sure the segment prefixes are
                 * different, then we can delete anything that's not in the new list if necessary.
                 */
                String oldContainer = manifestDLO.substring(0, manifestDLO.indexOf('/', 1));
                String oldPath = manifestDLO.substring(manifestDLO.indexOf('/', 1), manifestDLO.length());
                oldSegmentsToRemove = new HashMap<String, List<StorageObject>>();
                oldSegmentsToRemove.put(oldContainer, listObjects(region, oldContainer, oldPath));
            } else if (manifestSLO) {
                /*
                 * We have found an existing static large object, so grab the manifest data that
                 * details the existing segments - delete any later that we don't need any more
                 */

            }
        }

        int segmentNumber = 1;
        long timeStamp = System.currentTimeMillis() / 1000L;
        String segmentBase = String.format("%s/%d/%d", segmentFolder, timeStamp, objectSize);

        /*
         * Create subInputStream from the OutputStream we will pass to the
         * HttpEntity for writing content.
         */
        final PipedInputStream contentInStream = new PipedInputStream(64 * 1024);
        final PipedOutputStream contentOutStream = new PipedOutputStream(contentInStream);
        SubInputStream segmentStream = new SubInputStream(contentInStream, actualSegmentSize, false);

        /*
         * Fork the call to entity.writeTo() that allows us to grab any exceptions raised
         */
        final HttpEntity e = entity;

        final Callable<Boolean> writer = new Callable<Boolean>() {
            public Boolean call() throws Exception {
                e.writeTo(contentOutStream);
                return Boolean.TRUE;
            }
        };

        ExecutorService writeExecutor = Executors.newSingleThreadExecutor();
        final Future<Boolean> future = writeExecutor.submit(writer);
        /*
         * Check the future for exceptions after we've finished uploading segments
         */

        Map<String, List<StorageObject>> newSegmentsAdded = new HashMap<String, List<StorageObject>>();
        List<StorageObject> newSegments = new LinkedList<StorageObject>();
        JSONArray manifestSLO = new JSONArray();
        boolean finished = false;

        /*
         * Upload each segment of the file by reading sections of the content input stream
         * until the entire underlying stream is complete
         */
        while (!finished) {
            String segmentName = String.format("%s/%08d", segmentBase, segmentNumber);

            String etag;
            boolean error = false;
            try {
                etag = storeObject(region, segmentContainer, segmentStream, "application/octet-stream",
                        segmentName, new HashMap<String, String>());
            } catch (IOException ex) {
                // Finished storing the object
                System.out.println("Caught IO Exception: " + ex.getMessage());
                ex.printStackTrace();
                throw ex;
            }
            String segmentPath = segmentContainer + "/" + segmentName;
            long bytesUploaded = segmentStream.getBytesProduced();

            /*
             * Create the appropriate manifest structure if we're making a static large
             * object.
             *
             *   ETAG returned by the simple upload
             *   total size of segment uploaded
             *   path of segment
             */
            if (!dynamicLargeObject) {
                JSONObject segmentJSON = new JSONObject();

                segmentJSON.put("path", segmentPath);
                segmentJSON.put("etag", etag);
                segmentJSON.put("size_bytes", bytesUploaded);
                manifestSLO.add(segmentJSON);

                newSegments.add(new StorageObject(segmentName));
            }

            segmentNumber++;
            if (!finished) {
                finished = segmentStream.endSourceReached();
            }
            newSegmentsAdded.put(segmentContainer, newSegments);
            System.out.println("JSON: " + manifestSLO.toString());
            if (error)
                return "";

            segmentStream.readMoreBytes(actualSegmentSize);
        }

        /*
         * Attempts to retrieve the return value from the write operation
         * Any exceptions raised can then be handled appropriately
         */
        try {
            future.get();
        } catch (InterruptedException ex) {
            /*
             * The write was interrupted... delete the segments?
             */
        } catch (ExecutionException ex) {
            /*
             * This should always be an IOException or a RuntimeException
             * because the call to entity.writeTo() only throws IOException
             */
            Throwable t = ex.getCause();

            if (t instanceof IOException) {
                throw (IOException) t;
            } else {
                throw (RuntimeException) t;
            }
        }

        /*
         * Create an appropriate manifest depending on our DLO/SLO choice
         */
        String manifestEtag = null;
        if (dynamicLargeObject) {
            /*
             * Empty manifest with header detailing the shared prefix of object segments
             */
            long manifestTimeStamp = System.currentTimeMillis() / 1000L;
            metadata.put("X-Object-Manifest", segmentBase);
            metadata.put("x-object-meta-mtime", String.format("%s", manifestTimeStamp));
            manifestEtag = storeObject(region, container, new ByteArrayInputStream(new byte[0]),
                    entity.getContentType().getValue(), name, metadata);
        } else {
            /*
             * Manifest containing json list specifying details of the object segments.
             */
            URIBuilder urlBuild = new URIBuilder(region.getStorageUrl(container, name));
            urlBuild.setParameter("multipart-manifest", "put");
            URI url;
            try {
                url = urlBuild.build();
                String manifestContent = manifestSLO.toString();
                InputStreamEntity manifestEntity = new InputStreamEntity(
                        new ByteArrayInputStream(manifestContent.getBytes()), -1);
                manifestEntity.setChunked(true);
                manifestEntity.setContentType(entity.getContentType());
                HttpPut method = new HttpPut(url);
                method.setEntity(manifestEntity);
                method.setHeader("x-static-large-object", "true");
                Response response = this.execute(method, new DefaultResponseHandler());
                if (response.getStatusCode() == HttpStatus.SC_CREATED) {
                    manifestEtag = response.getResponseHeader(HttpHeaders.ETAG).getValue();
                } else {
                    throw new GenericException(response);
                }
            } catch (URISyntaxException ex) {
                ex.printStackTrace();
            }
        }

        /*
         * Delete stale segments of overwritten large object if requested.
         */
        if (!leaveSegments) {
            /*
             * Before deleting old segments, remove any objects from the delete list
             * that are also part of a new static large object that were updated during the upload.
             */
            if (!(oldSegmentsToRemove == null)) {
                for (String c : oldSegmentsToRemove.keySet()) {
                    List<StorageObject> rmv = oldSegmentsToRemove.get(c);
                    if (newSegmentsAdded.containsKey(c)) {
                        rmv.removeAll(newSegmentsAdded.get(c));
                    }
                    List<String> rmvNames = new LinkedList<String>();
                    for (StorageObject s : rmv) {
                        rmvNames.add(s.getName());
                    }
                    deleteObjects(region, c, rmvNames);
                }
            }
        }

        return manifestEtag;
    }
}

From source file:net.pms.dlna.DLNAResource.java

/**
 * Returns an InputStream of this DLNAResource that starts at a given time, if possible. Very useful if video chapters are being used.
 * @param range//w w  w .j a v a  2  s.  c  om
 * @param mediarenderer
 * @return The inputstream
 * @throws IOException
 */
public InputStream getInputStream(Range range, RendererConfiguration mediarenderer) throws IOException {
    logger.trace("Asked stream chunk : " + range + " of " + getName() + " and player " + getPlayer());

    // shagrath: small fix, regression on chapters
    boolean timeseek_auto = false;
    // Ditlew - WDTV Live
    // Ditlew - We convert byteoffset to timeoffset here. This needs the stream to be CBR!
    int cbr_video_bitrate = mediarenderer.getCBRVideoBitrate();
    long low = range.isByteRange() && range.isStartOffsetAvailable() ? range.asByteRange().getStart() : 0;
    long high = range.isByteRange() && range.isEndLimitAvailable() ? range.asByteRange().getEnd() : -1;
    Range.Time timeRange = range.createTimeRange();

    if (getPlayer() != null && low > 0 && cbr_video_bitrate > 0) {
        int used_bit_rated = (int) ((cbr_video_bitrate + 256) * 1024 / 8 * 1.04); // 1.04 = container overhead
        if (low > used_bit_rated) {
            timeRange.setStart((double) (low / (used_bit_rated)));
            low = 0;

            // WDTV Live - if set to TS it asks multiple times and ends by
            // asking for an invalid offset which kills MEncoder
            if (timeRange.getStartOrZero() > getMedia().getDurationInSeconds()) {
                return null;
            }

            // Should we rewind a little (in case our overhead isn't accurate enough)
            int rewind_secs = mediarenderer.getByteToTimeseekRewindSeconds();
            timeRange.rewindStart(rewind_secs);

            // shagrath:
            timeseek_auto = true;
        }
    }

    // determine source of the stream
    if (getPlayer() == null) {
        // no transcoding
        if (this instanceof IPushOutput) {
            PipedOutputStream out = new PipedOutputStream();
            InputStream fis = new PipedInputStream(out);
            ((IPushOutput) this).push(out);

            if (low > 0) {
                fis.skip(low);
            }
            // http://www.ps3mediaserver.org/forum/viewtopic.php?f=11&t=12035
            fis = wrap(fis, high, low);

            return fis;
        }

        InputStream fis;
        if (getFormat() != null && getFormat().isImage() && getMedia() != null
                && getMedia().getOrientation() > 1 && mediarenderer.isAutoRotateBasedOnExif()) {
            // seems it's a jpeg file with an orientation setting to take care of
            fis = ImagesUtil.getAutoRotateInputStreamImage(getInputStream(), getMedia().getOrientation());
            if (fis == null) { // error, let's return the original one
                fis = getInputStream();
            }
        } else {
            fis = getInputStream();
        }

        if (fis != null) {
            if (low > 0) {
                fis.skip(low);
            }

            // http://www.ps3mediaserver.org/forum/viewtopic.php?f=11&t=12035
            fis = wrap(fis, high, low);

            if (timeRange.getStartOrZero() > 0 && this instanceof RealFile) {
                fis.skip(MpegUtil.getPositionForTimeInMpeg(((RealFile) this).getFile(),
                        (int) timeRange.getStartOrZero()));
            }
        }
        return fis;
    } else {
        // pipe transcoding result
        OutputParams params = new OutputParams(configuration);
        params.aid = getMediaAudio();
        params.sid = getMediaSubtitle();
        params.mediaRenderer = mediarenderer;
        timeRange.limit(getSplitRange());
        params.timeseek = timeRange.getStartOrZero();
        params.timeend = timeRange.getEndOrZero();
        params.shift_scr = timeseek_auto;

        if (this instanceof IPushOutput) {
            params.stdin = (IPushOutput) this;
        }

        // (re)start transcoding process if necessary
        if (externalProcess == null || externalProcess.isDestroyed()) {
            // first playback attempt => start new transcoding process
            logger.info("Starting transcode/remux of " + getName());
            externalProcess = getPlayer().launchTranscode(this, getMedia(), params);
            if (params.waitbeforestart > 0) {
                logger.trace("Sleeping for {} milliseconds", params.waitbeforestart);
                try {
                    Thread.sleep(params.waitbeforestart);
                } catch (InterruptedException e) {
                    logger.error(null, e);
                }
                logger.trace("Finished sleeping for " + params.waitbeforestart + " milliseconds");
            }
        } else if (params.timeseek > 0 && getMedia() != null && getMedia().isMediaparsed()
                && getMedia().getDurationInSeconds() > 0) {
            // time seek request => stop running transcode process and start new one
            logger.debug("Requesting time seek: " + params.timeseek + " seconds");
            params.minBufferSize = 1;
            Runnable r = new Runnable() {
                @Override
                public void run() {
                    externalProcess.stopProcess();
                }
            };
            new Thread(r, "External Process Stopper").start();
            ProcessWrapper newExternalProcess = getPlayer().launchTranscode(this, getMedia(), params);
            try {
                Thread.sleep(1000);
            } catch (InterruptedException e) {
                logger.error(null, e);
            }
            if (newExternalProcess == null) {
                logger.trace("External process instance is null... sounds not good");
            }
            externalProcess = newExternalProcess;
        }
        if (externalProcess == null) {
            return null;
        }
        InputStream is = null;
        int timer = 0;
        while (is == null && timer < 10) {
            is = externalProcess.getInputStream(low);
            timer++;
            if (is == null) {
                logger.warn("External input stream instance is null... sounds not good, waiting 500ms");
                try {
                    Thread.sleep(500);
                } catch (InterruptedException e) {
                }
            }
        }

        // fail fast: don't leave a process running indefinitely if it's
        // not producing output after params.waitbeforestart milliseconds + 5 seconds
        // this cleans up lingering MEncoder web video transcode processes that hang
        // instead of exiting
        if (is == null && externalProcess != null && !externalProcess.isDestroyed()) {
            Runnable r = new Runnable() {
                @Override
                public void run() {
                    logger.error("External input stream instance is null... stopping process");
                    externalProcess.stopProcess();
                }
            };
            new Thread(r, "Hanging External Process Stopper").start();
        }
        return is;
    }
}